From fa82d1a979d65683db336ad24cf6cfe6a97a25c5 Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Wed, 15 Jun 2022 01:35:17 -0400 Subject: [PATCH 001/180] [Automation] Update elastic stack version to 8.4.0-40cff009 for testing (#557) Co-authored-by: apmmachine --- testing/environments/snapshot.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/testing/environments/snapshot.yml b/testing/environments/snapshot.yml index 9a64bdc5d95..a1753983e81 100644 --- a/testing/environments/snapshot.yml +++ b/testing/environments/snapshot.yml @@ -3,7 +3,7 @@ version: '2.3' services: elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch:8.4.0-7e67f5d9-SNAPSHOT + image: docker.elastic.co/elasticsearch/elasticsearch:8.4.0-40cff009-SNAPSHOT # When extend is used it merges healthcheck.tests, see: # https://github.com/docker/compose/issues/8962 # healthcheck: @@ -42,7 +42,7 @@ services: - ./docker/logstash/pki:/etc/pki:ro kibana: - image: docker.elastic.co/kibana/kibana:8.4.0-7e67f5d9-SNAPSHOT + image: docker.elastic.co/kibana/kibana:8.4.0-40cff009-SNAPSHOT environment: - "ELASTICSEARCH_USERNAME=kibana_system_user" - "ELASTICSEARCH_PASSWORD=testing" From 9e5bb1568c018f41e10b870494b220924b5c20d0 Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Thu, 16 Jun 2022 01:44:50 -0400 Subject: [PATCH 002/180] [Automation] Update elastic stack version to 8.4.0-5e6770b1 for testing (#564) Co-authored-by: apmmachine --- testing/environments/snapshot.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/testing/environments/snapshot.yml b/testing/environments/snapshot.yml index a1753983e81..3e0b7d1eb16 100644 --- a/testing/environments/snapshot.yml +++ b/testing/environments/snapshot.yml @@ -3,7 +3,7 @@ version: '2.3' services: elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch:8.4.0-40cff009-SNAPSHOT + image: docker.elastic.co/elasticsearch/elasticsearch:8.4.0-5e6770b1-SNAPSHOT # When extend is used it merges healthcheck.tests, see: # https://github.com/docker/compose/issues/8962 # healthcheck: @@ -42,7 +42,7 @@ services: - ./docker/logstash/pki:/etc/pki:ro kibana: - image: docker.elastic.co/kibana/kibana:8.4.0-40cff009-SNAPSHOT + image: docker.elastic.co/kibana/kibana:8.4.0-5e6770b1-SNAPSHOT environment: - "ELASTICSEARCH_USERNAME=kibana_system_user" - "ELASTICSEARCH_PASSWORD=testing" From 5f66839fc488c7c25376a983de70485ccccb1282 Mon Sep 17 00:00:00 2001 From: Pier-Hugues Pellerin Date: Thu, 16 Jun 2022 09:49:36 -0400 Subject: [PATCH 003/180] Fix regression and use comma separated values (#560) Fix regression from https://github.com/elastic/elastic-agent/pull/509 --- .ci/Jenkinsfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.ci/Jenkinsfile b/.ci/Jenkinsfile index bcacd5e1be3..be1a3065c8d 100644 --- a/.ci/Jenkinsfile +++ b/.ci/Jenkinsfile @@ -219,7 +219,7 @@ pipeline { axes { axis { name 'K8S_VERSION' - values "v1.24.0, v1.23.6, v1.22.9, v1.21.12" + values "v1.24.0", "v1.23.6", "v1.22.9", "v1.21.12" } } stages { From 06ee1dd37d45749c96db749cfab3679d4ec11f21 Mon Sep 17 00:00:00 2001 From: Pier-Hugues Pellerin Date: Thu, 16 Jun 2022 16:56:36 -0400 Subject: [PATCH 004/180] Change in Jenkinsfile will trigger k8s run (#568) --- .ci/Jenkinsfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.ci/Jenkinsfile b/.ci/Jenkinsfile index be1a3065c8d..10b0b97a572 100644 --- a/.ci/Jenkinsfile +++ b/.ci/Jenkinsfile @@ -50,7 +50,7 @@ pipeline { dir("${BASE_DIR}"){ setEnvVar('ONLY_DOCS', isGitRegionMatch(patterns: [ '.*\\.(asciidoc|md)' ], shouldMatchAll: true).toString()) setEnvVar('PACKAGING_CHANGES', isGitRegionMatch(patterns: [ '(^dev-tools/packaging/.*|.ci/Jenkinsfile)' ], shouldMatchAll: false).toString()) - setEnvVar('K8S_CHANGES', isGitRegionMatch(patterns: [ '(^deploy/kubernetes/.*|^version/docs/version.asciidoc)' ], shouldMatchAll: false).toString()) + setEnvVar('K8S_CHANGES', isGitRegionMatch(patterns: [ '(^deploy/kubernetes/.*|^version/docs/version.asciidoc|.ci/Jenkinsfile)' ], shouldMatchAll: false).toString()) } } } From 86ca6f773ffd16464b32d0650307e8832e0671a1 Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Fri, 17 Jun 2022 01:41:07 -0400 Subject: [PATCH 005/180] [Automation] Update elastic stack version to 8.4.0-da5a1c6d for testing (#573) Co-authored-by: apmmachine --- testing/environments/snapshot.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/testing/environments/snapshot.yml b/testing/environments/snapshot.yml index 3e0b7d1eb16..f067998ade9 100644 --- a/testing/environments/snapshot.yml +++ b/testing/environments/snapshot.yml @@ -3,7 +3,7 @@ version: '2.3' services: elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch:8.4.0-5e6770b1-SNAPSHOT + image: docker.elastic.co/elasticsearch/elasticsearch:8.4.0-da5a1c6d-SNAPSHOT # When extend is used it merges healthcheck.tests, see: # https://github.com/docker/compose/issues/8962 # healthcheck: @@ -42,7 +42,7 @@ services: - ./docker/logstash/pki:/etc/pki:ro kibana: - image: docker.elastic.co/kibana/kibana:8.4.0-5e6770b1-SNAPSHOT + image: docker.elastic.co/kibana/kibana:8.4.0-da5a1c6d-SNAPSHOT environment: - "ELASTICSEARCH_USERNAME=kibana_system_user" - "ELASTICSEARCH_PASSWORD=testing" From 7b29dbc22fe3f8c0a4ffdf8b4ef9cbb8a4f40313 Mon Sep 17 00:00:00 2001 From: Denis Rechkunov Date: Fri, 17 Jun 2022 10:40:47 +0200 Subject: [PATCH 006/180] Add `@metadata.input_id` and `@metadata.stream_id` when injecting streams (#527) These 2 value are going to be used in the shipper to identify where an event came from in order to apply processors accordingly. Also, added test cases for the processor to verify the change and updated test cases with the new processor. --- CHANGELOG.next.asciidoc | 1 + .../testdata/audit_config-auditbeat.yml | 20 ++++ .../testdata/logstash_config-metricbeat.yml | 4 + .../program/testdata/namespace-metricbeat.yml | 6 +- .../testdata/single_config-metricbeat.yml | 4 + .../generated/namespace.metricbeat.golden.yml | 4 + .../single_config.filebeat.golden.yml | 8 ++ .../single_config.metricbeat.golden.yml | 4 + internal/pkg/agent/transpiler/rules.go | 53 +++++++++ internal/pkg/agent/transpiler/rules_test.go | 108 ++++++++++++++++++ 10 files changed, 211 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 2c3e563cf21..c911f1bedaf 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -183,3 +183,4 @@ - Save the agent configuration and the state encrypted on the disk. {issue}535[535] {pull}398[398] - Bump node.js version for heartbeat/synthetics to 16.15.0 - Support scheduled actions and cancellation of pending actions. {issue}393[393] {pull}419[419] +- Add `@metadata.input_id` and `@metadata.stream_id` when applying the inject stream processor {pull}527[527] diff --git a/internal/pkg/agent/program/testdata/audit_config-auditbeat.yml b/internal/pkg/agent/program/testdata/audit_config-auditbeat.yml index 7ea962de31a..0ebac6eff69 100644 --- a/internal/pkg/agent/program/testdata/audit_config-auditbeat.yml +++ b/internal/pkg/agent/program/testdata/audit_config-auditbeat.yml @@ -24,6 +24,10 @@ auditbeat: fields: dataset: auditd_manager.auditd target: event + - add_fields: + fields: + stream_id: audit/auditd-auditd_manager.auditd-6d35f37c-2243-4229-9fba-ccf39305b536 + target: '@metadata' - add_fields: fields: id: agent-id @@ -59,6 +63,10 @@ auditbeat: fields: dataset: auditd_manager.file_integrity target: event + - add_fields: + fields: + stream_id: fim_1 + target: '@metadata' - add_fields: fields: id: agent-id @@ -89,6 +97,10 @@ auditbeat: fields: dataset: auditd_manager.file_integrity target: event + - add_fields: + fields: + stream_id: fim_2 + target: '@metadata' - add_fields: fields: id: agent-id @@ -117,6 +129,10 @@ auditbeat: fields: dataset: audit_system.socket target: event + - add_fields: + fields: + stream_id: id-auditd-system-socket + target: '@metadata' - add_fields: fields: id: agent-id @@ -143,6 +159,10 @@ auditbeat: fields: dataset: audit_system.process target: event + - add_fields: + fields: + stream_id: id-auditd-system-process + target: '@metadata' - add_fields: fields: id: agent-id diff --git a/internal/pkg/agent/program/testdata/logstash_config-metricbeat.yml b/internal/pkg/agent/program/testdata/logstash_config-metricbeat.yml index a2d429c8108..8b79bc1e060 100644 --- a/internal/pkg/agent/program/testdata/logstash_config-metricbeat.yml +++ b/internal/pkg/agent/program/testdata/logstash_config-metricbeat.yml @@ -58,6 +58,10 @@ metricbeat: - add_fields: fields: should_be: first + - add_fields: + target: "@metadata" + fields: + input_id: apache-metrics-id - add_fields: target: "data_stream" fields: diff --git a/internal/pkg/agent/program/testdata/namespace-metricbeat.yml b/internal/pkg/agent/program/testdata/namespace-metricbeat.yml index d0d4c24f058..46df931eb66 100644 --- a/internal/pkg/agent/program/testdata/namespace-metricbeat.yml +++ b/internal/pkg/agent/program/testdata/namespace-metricbeat.yml @@ -58,6 +58,10 @@ metricbeat: - add_fields: fields: should_be: first + - add_fields: + target: "@metadata" + fields: + input_id: apache-metrics-id - add_fields: target: "data_stream" fields: @@ -83,7 +87,7 @@ output: hosts: [127.0.0.1:9200, 127.0.0.1:9300] headers: h1: test-header - + namespace: test_namespace username: elastic password: changeme diff --git a/internal/pkg/agent/program/testdata/single_config-metricbeat.yml b/internal/pkg/agent/program/testdata/single_config-metricbeat.yml index a2c36b151f0..0580c5454eb 100644 --- a/internal/pkg/agent/program/testdata/single_config-metricbeat.yml +++ b/internal/pkg/agent/program/testdata/single_config-metricbeat.yml @@ -58,6 +58,10 @@ metricbeat: - add_fields: fields: should_be: first + - add_fields: + target: "@metadata" + fields: + input_id: apache-metrics-id - add_fields: target: "data_stream" fields: diff --git a/internal/pkg/agent/program/testdata/usecases/generated/namespace.metricbeat.golden.yml b/internal/pkg/agent/program/testdata/usecases/generated/namespace.metricbeat.golden.yml index 3232297227a..2e4f8833947 100644 --- a/internal/pkg/agent/program/testdata/usecases/generated/namespace.metricbeat.golden.yml +++ b/internal/pkg/agent/program/testdata/usecases/generated/namespace.metricbeat.golden.yml @@ -64,6 +64,10 @@ metricbeat: - add_fields: fields: should_be: first + - add_fields: + fields: + input_id: apache-metrics-id + target: '@metadata' - add_fields: fields: dataset: generic diff --git a/internal/pkg/agent/program/testdata/usecases/generated/single_config.filebeat.golden.yml b/internal/pkg/agent/program/testdata/usecases/generated/single_config.filebeat.golden.yml index 22055f9e09e..507efa09af9 100644 --- a/internal/pkg/agent/program/testdata/usecases/generated/single_config.filebeat.golden.yml +++ b/internal/pkg/agent/program/testdata/usecases/generated/single_config.filebeat.golden.yml @@ -6,6 +6,10 @@ filebeat: - /var/log/hello1.log - /var/log/hello2.log processors: + - add_fields: + fields: + input_id: logfile-1 + target: '@metadata' - add_fields: fields: dataset: generic @@ -35,6 +39,10 @@ filebeat: - /var/log/hello3.log - /var/log/hello4.log processors: + - add_fields: + fields: + input_id: logfile-2 + target: '@metadata' - add_fields: fields: dataset: generic diff --git a/internal/pkg/agent/program/testdata/usecases/generated/single_config.metricbeat.golden.yml b/internal/pkg/agent/program/testdata/usecases/generated/single_config.metricbeat.golden.yml index aca14055635..f51381d3394 100644 --- a/internal/pkg/agent/program/testdata/usecases/generated/single_config.metricbeat.golden.yml +++ b/internal/pkg/agent/program/testdata/usecases/generated/single_config.metricbeat.golden.yml @@ -64,6 +64,10 @@ metricbeat: - add_fields: fields: should_be: first + - add_fields: + fields: + input_id: apache-metrics-id + target: '@metadata' - add_fields: fields: dataset: generic diff --git a/internal/pkg/agent/transpiler/rules.go b/internal/pkg/agent/transpiler/rules.go index e4e466ddcd9..ca97cedd707 100644 --- a/internal/pkg/agent/transpiler/rules.go +++ b/internal/pkg/agent/transpiler/rules.go @@ -669,6 +669,42 @@ func (r *InjectStreamProcessorRule) Apply(_ AgentInfo, ast *AST) (err error) { namespace := datastreamNamespaceFromInputNode(inputNode) datastreamType := datastreamTypeFromInputNode(inputNode, r.Type) + var inputID *StrVal + inputIDNode, found := inputNode.Find("id") + if found { + inputID, _ = inputIDNode.Value().(*StrVal) + } + + if inputID != nil { + // get input-level processors node + processorsNode, found := inputNode.Find("processors") + if !found { + processorsNode = &Key{ + name: "processors", + value: &List{value: make([]Node, 0)}, + } + + inputMap, ok := inputNode.(*Dict) + if ok { + inputMap.value = append(inputMap.value, processorsNode) + } + } + + processorsList, ok := processorsNode.Value().(*List) + if !ok { + return errors.New("InjectStreamProcessorRule: input processors is not a list") + } + + // inject `input_id` on the input level + processorMap := &Dict{value: make([]Node, 0)} + processorMap.value = append(processorMap.value, &Key{name: "target", value: &StrVal{value: "@metadata"}}) + processorMap.value = append(processorMap.value, &Key{name: "fields", value: &Dict{value: []Node{ + &Key{name: "input_id", value: inputID}, + }}}) + addFieldsMap := &Dict{value: []Node{&Key{"add_fields", processorMap}}} + processorsList.value = mergeStrategy(r.OnConflict).InjectItem(processorsList.value, addFieldsMap) + } + streamsNode, ok := inputNode.Find("streams") if !ok { continue @@ -680,6 +716,12 @@ func (r *InjectStreamProcessorRule) Apply(_ AgentInfo, ast *AST) (err error) { } for _, streamNode := range streamsList.value { + var streamID *StrVal + streamIDNode, ok := streamNode.Find("id") + if ok { + streamID, _ = streamIDNode.Value().(*StrVal) + } + streamMap, ok := streamNode.(*Dict) if !ok { continue @@ -722,6 +764,17 @@ func (r *InjectStreamProcessorRule) Apply(_ AgentInfo, ast *AST) (err error) { }}}) addFieldsMap = &Dict{value: []Node{&Key{"add_fields", processorMap}}} processorsList.value = mergeStrategy(r.OnConflict).InjectItem(processorsList.value, addFieldsMap) + + if streamID != nil { + // source stream + processorMap = &Dict{value: make([]Node, 0)} + processorMap.value = append(processorMap.value, &Key{name: "target", value: &StrVal{value: "@metadata"}}) + processorMap.value = append(processorMap.value, &Key{name: "fields", value: &Dict{value: []Node{ + &Key{name: "stream_id", value: streamID.Clone()}, + }}}) + addFieldsMap = &Dict{value: []Node{&Key{"add_fields", processorMap}}} + processorsList.value = mergeStrategy(r.OnConflict).InjectItem(processorsList.value, addFieldsMap) + } } } diff --git a/internal/pkg/agent/transpiler/rules_test.go b/internal/pkg/agent/transpiler/rules_test.go index ab2df9c1bce..840e1442fde 100644 --- a/internal/pkg/agent/transpiler/rules_test.go +++ b/internal/pkg/agent/transpiler/rules_test.go @@ -165,6 +165,114 @@ inputs: }, }, + "inject stream": { + givenYAML: ` +inputs: + - name: No streams, no IDs + type: file + - name: With streams and IDs + id: input-id + type: file + data_stream.namespace: nsns + streams: + - paths: /var/log/mysql/error.log + id: stream-id + data_stream.dataset: dsds + - name: With processors + id: input-id + type: file + data_stream.namespace: nsns + processors: + - add_fields: + target: some + fields: + dataset: value + streams: + - paths: /var/log/mysql/error.log + id: stream-id + data_stream.dataset: dsds + processors: + - add_fields: + target: another + fields: + dataset: value +`, + expectedYAML: ` +inputs: + - name: No streams, no IDs + type: file + - name: With streams and IDs + id: input-id + type: file + data_stream.namespace: nsns + processors: + - add_fields: + target: "@metadata" + fields: + input_id: input-id + streams: + - paths: /var/log/mysql/error.log + id: stream-id + data_stream.dataset: dsds + processors: + - add_fields: + target: data_stream + fields: + type: stream-type + namespace: nsns + dataset: dsds + - add_fields: + target: event + fields: + dataset: dsds + - add_fields: + target: "@metadata" + fields: + stream_id: stream-id + - name: With processors + id: input-id + type: file + data_stream.namespace: nsns + processors: + - add_fields: + target: some + fields: + dataset: value + - add_fields: + target: "@metadata" + fields: + input_id: input-id + streams: + - paths: /var/log/mysql/error.log + id: stream-id + data_stream.dataset: dsds + processors: + - add_fields: + target: another + fields: + dataset: value + - add_fields: + target: data_stream + fields: + type: stream-type + namespace: nsns + dataset: dsds + - add_fields: + target: event + fields: + dataset: dsds + - add_fields: + target: "@metadata" + fields: + stream_id: stream-id +`, + rule: &RuleList{ + Rules: []Rule{ + InjectStreamProcessor("insert_after", "stream-type"), + }, + }, + }, + "inject agent info": { givenYAML: ` inputs: From b728dfe00b576b8f244bd571d6b5cc64bac8709a Mon Sep 17 00:00:00 2001 From: Michel Laterman <82832767+michel-laterman@users.noreply.github.com> Date: Fri, 17 Jun 2022 09:52:31 -0700 Subject: [PATCH 007/180] Add filemod times to contents of diagnostics collect command (#570) * Add filemod times to contents of diagnostics collect command Add filemod times to the files and directories in the zip archive. Log files (and sub dirs) will use the modtime returned by the fileinfo for the source. Others will use the timestamp from when the zip is created. * Fix linter --- CHANGELOG.next.asciidoc | 1 + internal/pkg/agent/cmd/diagnostics.go | 136 ++++++++++++++++++++------ 2 files changed, 108 insertions(+), 29 deletions(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index c911f1bedaf..24b4f09c44a 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -102,6 +102,7 @@ - Update API calls that the agent makes to Kibana when running the container command. {pull}253[253] - diagnostics collect log names are fixed on Windows machines, command will ignore failures. AgentID is included in diagnostics(and diagnostics collect) output. {issue}81[81] {issue}92[92] {issue}190[190] {pull}262[262] - Collects stdout and stderr of applications run as a process and logs them. {issue}[88] +- diagnostics collect file mod times are set. {pull}570[570] ==== New features diff --git a/internal/pkg/agent/cmd/diagnostics.go b/internal/pkg/agent/cmd/diagnostics.go index 9095e34bfc3..3f68689930b 100644 --- a/internal/pkg/agent/cmd/diagnostics.go +++ b/internal/pkg/agent/cmd/diagnostics.go @@ -97,10 +97,7 @@ func newDiagnosticsCollectCommandWithArgs(_ []string, streams *cli.IOStreams) *c } output, _ := c.Flags().GetString("output") - switch output { - case "yaml": - case "json": - default: + if _, ok := diagOutputs[output]; !ok { return fmt.Errorf("unsupported output: %s", output) } @@ -454,6 +451,7 @@ func gatherConfig() (AgentConfig, error) { // The passed DiagnosticsInfo and AgentConfig data is written in the specified output format. // Any local log files are collected and copied into the archive. func createZip(fileName, outputFormat string, diag DiagnosticsInfo, cfg AgentConfig, pprof map[string][]client.ProcPProf, metrics *proto.ProcMetricsResponse, errs []error) error { + ts := time.Now().UTC() f, err := os.Create(fileName) if err != nil { return err @@ -461,7 +459,11 @@ func createZip(fileName, outputFormat string, diag DiagnosticsInfo, cfg AgentCon zw := zip.NewWriter(f) if len(errs) > 0 { - zf, err := zw.Create("errors.txt") + zf, err := zw.CreateHeader(&zip.FileHeader{ + Name: "errors.txt", + Method: zip.Deflate, + Modified: ts, + }) if err != nil { return closeHandlers(err, zw, f) } @@ -470,12 +472,20 @@ func createZip(fileName, outputFormat string, diag DiagnosticsInfo, cfg AgentCon } } - _, err = zw.Create("meta/") + _, err = zw.CreateHeader(&zip.FileHeader{ + Name: "meta/", + Method: zip.Deflate, + Modified: ts, + }) if err != nil { return closeHandlers(err, zw, f) } - zf, err := zw.Create("meta/elastic-agent-version." + outputFormat) + zf, err := zw.CreateHeader(&zip.FileHeader{ + Name: "meta/elastic-agent-version" + outputFormat, + Method: zip.Deflate, + Modified: ts, + }) if err != nil { return closeHandlers(err, zw, f) } @@ -485,7 +495,11 @@ func createZip(fileName, outputFormat string, diag DiagnosticsInfo, cfg AgentCon } for _, m := range diag.ProcMeta { - zf, err = zw.Create("meta/" + m.Name + "-" + m.RouteKey + "." + outputFormat) + zf, err := zw.CreateHeader(&zip.FileHeader{ + Name: "meta/" + m.Name + "-" + m.RouteKey + "." + outputFormat, + Method: zip.Deflate, + Modified: ts, + }) if err != nil { return closeHandlers(err, zw, f) } @@ -495,12 +509,20 @@ func createZip(fileName, outputFormat string, diag DiagnosticsInfo, cfg AgentCon } } - _, err = zw.Create("config/") + _, err = zw.CreateHeader(&zip.FileHeader{ + Name: "config/", + Method: zip.Deflate, + Modified: ts, + }) if err != nil { return closeHandlers(err, zw, f) } - zf, err = zw.Create("config/elastic-agent-local." + outputFormat) + zf, err = zw.CreateHeader(&zip.FileHeader{ + Name: "config/elastic-agent-local." + outputFormat, + Method: zip.Deflate, + Modified: ts, + }) if err != nil { return closeHandlers(err, zw, f) } @@ -508,7 +530,11 @@ func createZip(fileName, outputFormat string, diag DiagnosticsInfo, cfg AgentCon return closeHandlers(err, zw, f) } - zf, err = zw.Create("config/elastic-agent-policy." + outputFormat) + zf, err = zw.CreateHeader(&zip.FileHeader{ + Name: "config/elastic-agent-policy." + outputFormat, + Method: zip.Deflate, + Modified: ts, + }) if err != nil { return closeHandlers(err, zw, f) } @@ -516,7 +542,11 @@ func createZip(fileName, outputFormat string, diag DiagnosticsInfo, cfg AgentCon return closeHandlers(err, zw, f) } for name, appCfg := range cfg.AppConfig { - zf, err := zw.Create("config/" + name + "." + outputFormat) + zf, err := zw.CreateHeader(&zip.FileHeader{ + Name: "config/" + name + "." + outputFormat, + Method: zip.Deflate, + Modified: ts, + }) if err != nil { return closeHandlers(err, zw, f) } @@ -525,19 +555,19 @@ func createZip(fileName, outputFormat string, diag DiagnosticsInfo, cfg AgentCon } } - if err := zipLogs(zw); err != nil { + if err := zipLogs(zw, ts); err != nil { return closeHandlers(err, zw, f) } if pprof != nil { - err := zipProfs(zw, pprof) + err := zipProfs(zw, pprof, ts) if err != nil { return closeHandlers(err, zw, f) } } if metrics != nil && len(metrics.Result) > 0 { - err := zipMetrics(zw, metrics) + err := zipMetrics(zw, metrics, ts) if err != nil { return closeHandlers(err, zw, f) } @@ -547,8 +577,12 @@ func createZip(fileName, outputFormat string, diag DiagnosticsInfo, cfg AgentCon } // zipLogs walks paths.Logs() and copies the file structure into zw in "logs/" -func zipLogs(zw *zip.Writer) error { - _, err := zw.Create("logs/") +func zipLogs(zw *zip.Writer, ts time.Time) error { + _, err := zw.CreateHeader(&zip.FileHeader{ + Name: "logs/", + Method: zip.Deflate, + Modified: ts, + }) if err != nil { return err } @@ -575,7 +609,16 @@ func zipLogs(zw *zip.Writer) error { } if d.IsDir() { - _, err := zw.Create("logs/" + name + "/") + dirTS := ts + di, err := d.Info() + if err == nil { + dirTS = di.ModTime() + } + _, err = zw.CreateHeader(&zip.FileHeader{ + Name: "logs/" + name + "/", + Method: zip.Deflate, + Modified: dirTS, + }) if err != nil { return fmt.Errorf("unable to create log directory in archive: %w", err) } @@ -625,7 +668,15 @@ func saveLogs(name string, logPath string, zw *zip.Writer) error { if err != nil { return fmt.Errorf("unable to open log file: %w", err) } - zf, err := zw.Create("logs/" + name) + lfs, err := lf.Stat() + if err != nil { + return closeHandlers(fmt.Errorf("unable to stat log file: %w", err), lf) + } + zf, err := zw.CreateHeader(&zip.FileHeader{ + Name: "logs/" + name, + Method: zip.Deflate, + Modified: lfs.ModTime(), + }) if err != nil { return closeHandlers(fmt.Errorf("unable to create log file in archive: %w", err), lf) } @@ -681,20 +732,32 @@ func getAllPprof(ctx context.Context, d time.Duration) (map[string][]client.Proc return daemon.Pprof(ctx, d, pprofTypes, "", "") } -func zipProfs(zw *zip.Writer, pprof map[string][]client.ProcPProf) error { - _, err := zw.Create("pprof/") +func zipProfs(zw *zip.Writer, pprof map[string][]client.ProcPProf, ts time.Time) error { + _, err := zw.CreateHeader(&zip.FileHeader{ + Name: "pprof/", + Method: zip.Deflate, + Modified: ts, + }) if err != nil { return err } for pType, profs := range pprof { - _, err := zw.Create("pprof/" + pType + "/") + _, err = zw.CreateHeader(&zip.FileHeader{ + Name: "pprof/" + pType + "/", + Method: zip.Deflate, + Modified: ts, + }) if err != nil { return err } for _, p := range profs { if p.Error != "" { - zf, err := zw.Create("pprof/" + pType + "/" + p.Name + "_" + p.RouteKey + "_error.txt") + zf, err := zw.CreateHeader(&zip.FileHeader{ + Name: "pprof/" + pType + "/" + p.Name + "_" + p.RouteKey + "_error.txt", + Method: zip.Deflate, + Modified: ts, + }) if err != nil { return err } @@ -704,7 +767,11 @@ func zipProfs(zw *zip.Writer, pprof map[string][]client.ProcPProf) error { } continue } - zf, err := zw.Create("pprof/" + pType + "/" + p.Name + "_" + p.RouteKey + ".pprof") + zf, err := zw.CreateHeader(&zip.FileHeader{ + Name: "pprof/" + pType + "/" + p.Name + "_" + p.RouteKey + ".pprof", + Method: zip.Deflate, + Modified: ts, + }) if err != nil { return err } @@ -717,16 +784,23 @@ func zipProfs(zw *zip.Writer, pprof map[string][]client.ProcPProf) error { return nil } -func zipMetrics(zw *zip.Writer, metrics *proto.ProcMetricsResponse) error { - //nolint:staticcheck,wastedassign // false positive - zf, err := zw.Create("metrics/") +func zipMetrics(zw *zip.Writer, metrics *proto.ProcMetricsResponse, ts time.Time) error { + _, err := zw.CreateHeader(&zip.FileHeader{ + Name: "metrics/", + Method: zip.Deflate, + Modified: ts, + }) if err != nil { return err } for _, m := range metrics.Result { if m.Error != "" { - zf, err = zw.Create("metrics/" + m.AppName + "_" + m.RouteKey + "_error.txt") + zf, err := zw.CreateHeader(&zip.FileHeader{ + Name: "metrics/" + m.AppName + "_" + m.RouteKey + "_error.txt", + Method: zip.Deflate, + Modified: ts, + }) if err != nil { return err } @@ -736,7 +810,11 @@ func zipMetrics(zw *zip.Writer, metrics *proto.ProcMetricsResponse) error { } continue } - zf, err = zw.Create("metrics/" + m.AppName + "_" + m.RouteKey + ".json") + zf, err := zw.CreateHeader(&zip.FileHeader{ + Name: "metrics/" + m.AppName + "_" + m.RouteKey + ".json", + Method: zip.Deflate, + Modified: ts, + }) if err != nil { return err } From 7db7406cc04d6dd8f2d47f8a37e299da4598056b Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Mon, 20 Jun 2022 01:57:36 -0400 Subject: [PATCH 008/180] [Automation] Update elastic stack version to 8.4.0-b13123ee for testing (#581) Co-authored-by: apmmachine --- testing/environments/snapshot.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/testing/environments/snapshot.yml b/testing/environments/snapshot.yml index f067998ade9..728f4152900 100644 --- a/testing/environments/snapshot.yml +++ b/testing/environments/snapshot.yml @@ -3,7 +3,7 @@ version: '2.3' services: elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch:8.4.0-da5a1c6d-SNAPSHOT + image: docker.elastic.co/elasticsearch/elasticsearch:8.4.0-b13123ee-SNAPSHOT # When extend is used it merges healthcheck.tests, see: # https://github.com/docker/compose/issues/8962 # healthcheck: @@ -42,7 +42,7 @@ services: - ./docker/logstash/pki:/etc/pki:ro kibana: - image: docker.elastic.co/kibana/kibana:8.4.0-da5a1c6d-SNAPSHOT + image: docker.elastic.co/kibana/kibana:8.4.0-b13123ee-SNAPSHOT environment: - "ELASTICSEARCH_USERNAME=kibana_system_user" - "ELASTICSEARCH_PASSWORD=testing" From d77a91e283b1f92e9d89837b30a20d2eb092341f Mon Sep 17 00:00:00 2001 From: Aleksandr Maus Date: Mon, 20 Jun 2022 15:50:55 -0400 Subject: [PATCH 009/180] Fix Agent upgrade 8.2->8.3 (#578) * Fix Agent upgrade 8.2->8.3 * Improve the upgrade encryption handling. Add .yml files cleanup. * Rollback ActionUpgrade to action_id, add MarkerActionUpgrade adapter struct for marker serialization compatibility --- .../agent/application/upgrade/step_mark.go | 77 +++++++++- .../pkg/agent/application/upgrade/upgrade.go | 76 +--------- internal/pkg/agent/cleaner/cleaner.go | 111 ++++++++++++++ internal/pkg/agent/cleaner/cleaner_test.go | 68 +++++++++ internal/pkg/agent/cmd/run.go | 135 ++++++++++++++++++ internal/pkg/fileutil/fileutil.go | 46 ++++++ 6 files changed, 434 insertions(+), 79 deletions(-) create mode 100644 internal/pkg/agent/cleaner/cleaner.go create mode 100644 internal/pkg/agent/cleaner/cleaner_test.go create mode 100644 internal/pkg/fileutil/fileutil.go diff --git a/internal/pkg/agent/application/upgrade/step_mark.go b/internal/pkg/agent/application/upgrade/step_mark.go index e176e4c5b96..66924337699 100644 --- a/internal/pkg/agent/application/upgrade/step_mark.go +++ b/internal/pkg/agent/application/upgrade/step_mark.go @@ -38,6 +38,58 @@ type UpdateMarker struct { Action *fleetapi.ActionUpgrade `json:"action" yaml:"action"` } +// MarkerActionUpgrade adapter struct compatible with pre 8.3 version of the marker file format +type MarkerActionUpgrade struct { + ActionID string `yaml:"id"` + ActionType string `yaml:"type"` + Version string `yaml:"version"` + SourceURI string `yaml:"source_uri,omitempty"` +} + +func convertToMarkerAction(a *fleetapi.ActionUpgrade) *MarkerActionUpgrade { + if a == nil { + return nil + } + return &MarkerActionUpgrade{ + ActionID: a.ActionID, + ActionType: a.ActionType, + Version: a.Version, + SourceURI: a.SourceURI, + } +} + +func convertToActionUpgrade(a *MarkerActionUpgrade) *fleetapi.ActionUpgrade { + if a == nil { + return nil + } + return &fleetapi.ActionUpgrade{ + ActionID: a.ActionID, + ActionType: a.ActionType, + Version: a.Version, + SourceURI: a.SourceURI, + } +} + +type updateMarkerSerializer struct { + Hash string `yaml:"hash"` + UpdatedOn time.Time `yaml:"updated_on"` + PrevVersion string `yaml:"prev_version"` + PrevHash string `yaml:"prev_hash"` + Acked bool `yaml:"acked"` + Action *MarkerActionUpgrade `yaml:"action"` +} + +func newMarkerSerializer(m *UpdateMarker) *updateMarkerSerializer { + return &updateMarkerSerializer{ + Hash: m.Hash, + UpdatedOn: m.UpdatedOn, + PrevVersion: m.PrevVersion, + PrevHash: m.PrevHash, + Acked: m.Acked, + Action: convertToMarkerAction(m.Action), + } +} + // markUpgrade marks update happened so we can handle grace period func (u *Upgrader) markUpgrade(_ context.Context, hash string, action Action) error { prevVersion := release.Version() @@ -46,7 +98,7 @@ func (u *Upgrader) markUpgrade(_ context.Context, hash string, action Action) er prevHash = prevHash[:hashLen] } - marker := UpdateMarker{ + marker := &UpdateMarker{ Hash: hash, UpdatedOn: time.Now(), PrevVersion: prevVersion, @@ -54,7 +106,7 @@ func (u *Upgrader) markUpgrade(_ context.Context, hash string, action Action) er Action: action.FleetAction(), } - markerBytes, err := yaml.Marshal(marker) + markerBytes, err := yaml.Marshal(newMarkerSerializer(marker)) if err != nil { return errors.New(err, errors.TypeConfig, "failed to parse marker file") } @@ -103,16 +155,31 @@ func LoadMarker() (*UpdateMarker, error) { return nil, err } - marker := &UpdateMarker{} + marker := &updateMarkerSerializer{} if err := yaml.Unmarshal(markerBytes, &marker); err != nil { return nil, err } - return marker, nil + return &UpdateMarker{ + Hash: marker.Hash, + UpdatedOn: marker.UpdatedOn, + PrevVersion: marker.PrevVersion, + PrevHash: marker.PrevHash, + Acked: marker.Acked, + Action: convertToActionUpgrade(marker.Action), + }, nil } func saveMarker(marker *UpdateMarker) error { - markerBytes, err := yaml.Marshal(marker) + makerSerializer := &updateMarkerSerializer{ + Hash: marker.Hash, + UpdatedOn: marker.UpdatedOn, + PrevVersion: marker.PrevVersion, + PrevHash: marker.PrevHash, + Acked: marker.Acked, + Action: convertToMarkerAction(marker.Action), + } + markerBytes, err := yaml.Marshal(makerSerializer) if err != nil { return err } diff --git a/internal/pkg/agent/application/upgrade/upgrade.go b/internal/pkg/agent/application/upgrade/upgrade.go index 81fb7a78444..9d67165d0eb 100644 --- a/internal/pkg/agent/application/upgrade/upgrade.go +++ b/internal/pkg/agent/application/upgrade/upgrade.go @@ -5,7 +5,6 @@ package upgrade import ( - "bytes" "context" "fmt" "io/ioutil" @@ -20,10 +19,8 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/agent/application/info" "github.com/elastic/elastic-agent/internal/pkg/agent/application/paths" "github.com/elastic/elastic-agent/internal/pkg/agent/application/reexec" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/secret" "github.com/elastic/elastic-agent/internal/pkg/agent/errors" "github.com/elastic/elastic-agent/internal/pkg/agent/program" - "github.com/elastic/elastic-agent/internal/pkg/agent/storage" "github.com/elastic/elastic-agent/internal/pkg/artifact" "github.com/elastic/elastic-agent/internal/pkg/capabilities" "github.com/elastic/elastic-agent/internal/pkg/core/state" @@ -173,10 +170,6 @@ func (u *Upgrader) Upgrade(ctx context.Context, a Action, reexecNow bool) (_ ree return nil, errors.New(err, "failed to copy action store") } - if err := encryptConfigIfNeeded(u.log, newHash); err != nil { - return nil, errors.New(err, "failed to encrypt the configuration") - } - if err := ChangeSymlink(ctx, newHash); err != nil { rollbackInstall(ctx, newHash) return nil, err @@ -220,6 +213,8 @@ func (u *Upgrader) Ack(ctx context.Context) error { return err } + marker.Acked = true + return saveMarker(marker) } @@ -335,73 +330,6 @@ func copyVault(newHash string) error { return nil } -// Create the key if it doesn't exist and encrypt the fleet.yml and state.yml -func encryptConfigIfNeeded(log *logger.Logger, newHash string) (err error) { - vaultPath := getVaultPath(newHash) - - err = secret.CreateAgentSecret(secret.WithVaultPath(vaultPath)) - if err != nil { - return err - } - - newHome := filepath.Join(filepath.Dir(paths.Home()), fmt.Sprintf("%s-%s", agentName, newHash)) - ymlStateStorePath := filepath.Join(newHome, filepath.Base(paths.AgentStateStoreYmlFile())) - stateStorePath := filepath.Join(newHome, filepath.Base(paths.AgentStateStoreFile())) - - files := []struct { - Src string - Dst string - }{ - { - Src: ymlStateStorePath, - Dst: stateStorePath, - }, - { - Src: paths.AgentConfigYmlFile(), - Dst: paths.AgentConfigFile(), - }, - } - for _, f := range files { - var b []byte - b, err = ioutil.ReadFile(f.Src) - if err != nil { - if os.IsNotExist(err) { - continue - } - return err - } - - // Encrypt yml file - store := storage.NewEncryptedDiskStore(f.Dst, storage.WithVaultPath(vaultPath)) - err = store.Save(bytes.NewReader(b)) - if err != nil { - return err - } - - // Remove yml file if no errors - defer func(fp string) { - if err != nil { - return - } - if rerr := os.Remove(fp); rerr != nil { - log.Warnf("failed to remove file: %s, err: %v", fp, rerr) - } - }(f.Src) - } - - // Do not remove AgentConfigYmlFile lock file if any error happened. - if err != nil { - return err - } - - lockFp := paths.AgentConfigYmlFile() + ".lock" - if rerr := os.Remove(lockFp); rerr != nil { - log.Warnf("failed to remove file: %s, err: %v", lockFp, rerr) - } - - return err -} - // shutdownCallback returns a callback function to be executing during shutdown once all processes are closed. // this goes through runtime directory of agent and copies all the state files created by processes to new versioned // home directory with updated process name to match new version. diff --git a/internal/pkg/agent/cleaner/cleaner.go b/internal/pkg/agent/cleaner/cleaner.go new file mode 100644 index 00000000000..856ae020b89 --- /dev/null +++ b/internal/pkg/agent/cleaner/cleaner.go @@ -0,0 +1,111 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package cleaner + +import ( + "context" + "os" + "sync" + "time" + + "github.com/elastic/elastic-agent-libs/logp" + "github.com/elastic/elastic-agent/internal/pkg/fileutil" +) + +// Wait interval. +// If the watchFile was not modified after this interval, then remove all the files in the removeFiles array +const defaultCleanWait = 15 * time.Minute + +type Cleaner struct { + log *logp.Logger + watchFile string + removeFiles []string + cleanWait time.Duration + + mx sync.Mutex +} + +type OptionFunc func(c *Cleaner) + +func New(log *logp.Logger, watchFile string, removeFiles []string, opts ...OptionFunc) *Cleaner { + c := &Cleaner{ + log: log, + watchFile: watchFile, + removeFiles: removeFiles, + cleanWait: defaultCleanWait, + } + + for _, opt := range opts { + opt(c) + } + return c +} + +func WithCleanWait(cleanWait time.Duration) OptionFunc { + return func(c *Cleaner) { + c.cleanWait = cleanWait + } +} + +func (c *Cleaner) Run(ctx context.Context) error { + wait, done, err := c.process() + if err != nil { + return err + } + + if done { + return nil + } + + t := time.NewTimer(wait) + defer t.Stop() + for { + select { + case <-ctx.Done(): + return nil + case <-t.C: + c.log.Debug("cleaner: timer triggered") + wait, done, err = c.process() + if err != nil { + return err + } + + if done { + return nil + } + t.Reset(wait) + } + } +} + +func (c *Cleaner) process() (wait time.Duration, done bool, err error) { + modTime, err := fileutil.GetModTime(c.watchFile) + if err != nil { + return + } + + c.log.Debugf("cleaner: check file %s mod time: %v", c.watchFile, modTime) + curDur := time.Since(modTime) + if curDur > c.cleanWait { + c.log.Debugf("cleaner: file %s modification expired", c.watchFile) + c.deleteFiles() + return wait, true, nil + } + wait = c.cleanWait - curDur + return wait, false, nil +} + +func (c *Cleaner) deleteFiles() { + c.log.Debugf("cleaner: delete files: %v", c.removeFiles) + c.mx.Lock() + defer c.mx.Unlock() + for _, fp := range c.removeFiles { + c.log.Debugf("cleaner: delete file: %v", fp) + err := os.Remove(fp) + if err != nil { + c.log.Warnf("cleaner: delete file %v failed: %v", fp, err) + } + } +} diff --git a/internal/pkg/agent/cleaner/cleaner_test.go b/internal/pkg/agent/cleaner/cleaner_test.go new file mode 100644 index 00000000000..cf189b784d3 --- /dev/null +++ b/internal/pkg/agent/cleaner/cleaner_test.go @@ -0,0 +1,68 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package cleaner + +import ( + "context" + "io/ioutil" + "os" + "path/filepath" + "testing" + "time" + + "github.com/elastic/elastic-agent-libs/logp" +) + +func TestCleaner(t *testing.T) { + // Setup + const watchFileName = "fleet.enc" + removeFiles := []string{"fleet.yml", "fleet.yml.lock"} + + ctx, cn := context.WithCancel(context.Background()) + defer cn() + + dir := t.TempDir() + watchFilePath := filepath.Join(dir, watchFileName) + + removeFilePaths := make([]string, len(removeFiles)) + + checkDir(t, dir, 0) + + // Create files + err := ioutil.WriteFile(watchFilePath, []byte{}, 0600) + if err != nil { + t.Fatal(err) + } + + for i, fn := range removeFiles { + removeFilePaths[i] = filepath.Join(dir, fn) + err := ioutil.WriteFile(removeFilePaths[i], []byte{}, 0600) + if err != nil { + t.Fatal(err) + } + } + + checkDir(t, dir, len(removeFiles)+1) + + log := logp.NewLogger("dynamic") + cleaner := New(log, watchFilePath, removeFilePaths, WithCleanWait(500*time.Millisecond)) + err = cleaner.Run(ctx) + if err != nil { + t.Fatal(err) + } + checkDir(t, dir, 1) +} + +func checkDir(t *testing.T, dir string, expectedCount int) { + t.Helper() + entries, err := os.ReadDir(dir) + if err != nil { + t.Fatal(err) + } + + if len(entries) != expectedCount { + t.Fatalf("Dir %s expected %d entries, found %d", dir, expectedCount, len(entries)) + } +} diff --git a/internal/pkg/agent/cmd/run.go b/internal/pkg/agent/cmd/run.go index b584baf2f09..723631b7960 100644 --- a/internal/pkg/agent/cmd/run.go +++ b/internal/pkg/agent/cmd/run.go @@ -5,6 +5,7 @@ package cmd import ( + "bytes" "context" "fmt" "io/ioutil" @@ -20,6 +21,7 @@ import ( "gopkg.in/yaml.v2" "github.com/elastic/elastic-agent-libs/api" + "github.com/elastic/elastic-agent-libs/logp" "github.com/elastic/elastic-agent-libs/monitoring" "github.com/elastic/elastic-agent-libs/service" "github.com/elastic/elastic-agent-system-metrics/report" @@ -31,6 +33,7 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/agent/application/reexec" "github.com/elastic/elastic-agent/internal/pkg/agent/application/secret" "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade" + "github.com/elastic/elastic-agent/internal/pkg/agent/cleaner" "github.com/elastic/elastic-agent/internal/pkg/agent/configuration" "github.com/elastic/elastic-agent/internal/pkg/agent/control/server" "github.com/elastic/elastic-agent/internal/pkg/agent/errors" @@ -41,6 +44,7 @@ import ( monitoringCfg "github.com/elastic/elastic-agent/internal/pkg/core/monitoring/config" monitoringServer "github.com/elastic/elastic-agent/internal/pkg/core/monitoring/server" "github.com/elastic/elastic-agent/internal/pkg/core/status" + "github.com/elastic/elastic-agent/internal/pkg/fileutil" "github.com/elastic/elastic-agent/internal/pkg/release" "github.com/elastic/elastic-agent/pkg/core/logger" "github.com/elastic/elastic-agent/version" @@ -126,6 +130,19 @@ func run(override cfgOverrider) error { return err } + // Check if the fleet.yml or state.yml exists and encrypt them. + // This is needed to handle upgrade properly. + // On agent upgrade the older version for example 8.2 unpacks the 8.3 agent + // and tries to run it. + // The new version of the agent requires encrypted configuration files or it will not start and upgrade will fail and revert. + err = encryptConfigIfNeeded(logger) + if err != nil { + return err + } + + // Start the old unencrypted agent configuration file cleaner + startOldAgentConfigCleaner(ctx, logger) + agentInfo, err := info.NewAgentInfoWithLog(defaultLogLevel(cfg), createAgentID) if err != nil { return errors.New(err, @@ -476,3 +493,121 @@ func initTracer(agentName, version string, mcfg *monitoringCfg.MonitoringConfig) Transport: ts, }) } + +// encryptConfigIfNeeded encrypts fleet.yml or state.yml if fleet.enc or state.enc does not exist already. +func encryptConfigIfNeeded(log *logger.Logger) (err error) { + log.Debug("encrypt config if needed") + + files := []struct { + Src string + Dst string + }{ + { + Src: paths.AgentStateStoreYmlFile(), + Dst: paths.AgentStateStoreFile(), + }, + { + Src: paths.AgentConfigYmlFile(), + Dst: paths.AgentConfigFile(), + }, + } + for _, f := range files { + var b []byte + + // Check if .yml file modification timestamp and existence + log.Debugf("check if the yml file %v exists", f.Src) + ymlModTime, ymlExists, err := fileutil.GetModTimeExists(f.Src) + if err != nil { + log.Errorf("failed to access yml file %v: %v", f.Src, err) + return err + } + + if !ymlExists { + log.Debugf("yml file %v doesn't exists, continue", f.Src) + continue + } + + // Check if .enc file modification timestamp and existence + log.Debugf("check if the enc file %v exists", f.Dst) + encModTime, encExists, err := fileutil.GetModTimeExists(f.Dst) + if err != nil { + log.Errorf("failed to access enc file %v: %v", f.Dst, err) + return err + } + + // If enc file exists and the yml file modification time is before enc file modification time then skip encryption. + // The reasoning is that the yml was not modified since the last time it was migrated to the encrypted file. + // The modification of the yml is possible in the cases where the agent upgrade failed and rolled back, leaving .enc file on the disk for example + if encExists && ymlModTime.Before(encModTime) { + log.Debugf("enc file %v already exists, and the yml was not modified after migration, yml mod time: %v, enc mod time: %v", f.Dst, ymlModTime, encModTime) + continue + } + + log.Debugf("read file: %v", f.Src) + b, err = ioutil.ReadFile(f.Src) + if err != nil { + log.Debugf("read file: %v, err: %v", f.Src, err) + return err + } + + // Encrypt yml file + log.Debugf("encrypt file %v into %v", f.Src, f.Dst) + store := storage.NewEncryptedDiskStore(f.Dst) + err = store.Save(bytes.NewReader(b)) + if err != nil { + log.Debugf("failed to encrypt file: %v, err: %v", f.Dst, err) + return err + } + } + + if err != nil { + return err + } + + // Remove state.yml file if no errors + fp := paths.AgentStateStoreYmlFile() + // Check if state.yml exists + exists, err := fileutil.FileExists(fp) + if err != nil { + log.Warnf("failed to check if file %s exists, err: %v", fp, err) + } + if exists { + if err := os.Remove(fp); err != nil { + // Log only + log.Warnf("failed to remove file: %s, err: %v", fp, err) + } + } + + // The agent can't remove fleet.yml, because it can be rolled back by the older version of the agent "watcher" + // and pre 8.3 version needs unencrypted fleet.yml file in order to start. + // The fleet.yml file removal is performed by the cleaner on the agent start after the .enc configuration was stable for the grace period after upgrade + + return nil +} + +// startOldAgentConfigCleaner starts the cleaner that removes fleet.yml and fleet.yml.lock files after 15 mins by default +// The interval is calculated from the last modified time of fleet.enc. It's possible that the fleet.enc +// will be modified again during that time, the assumption is that at some point there will be 15 mins interval when the fleet.enc is not modified. +// The modification time is used because it's the most cross-patform compatible timestamp on the files. +// This is tied to grace period, default 10 mins, when the agent is considered "stable" after the upgrade. +// The old agent watcher doesn't know anything about configuration encryption so we have to delete the old configuration files here. +// The cleaner is only started if fleet.yml exists +func startOldAgentConfigCleaner(ctx context.Context, log *logp.Logger) { + // Start cleaner only when fleet.yml exists + fp := paths.AgentConfigYmlFile() + exists, err := fileutil.FileExists(fp) + if err != nil { + log.Warnf("failed to check if file %s exists, err: %v", fp, err) + } + if !exists { + return + } + + c := cleaner.New(log, paths.AgentConfigFile(), []string{fp, fmt.Sprintf("%s.lock", fp)}) + go func() { + err := c.Run(ctx) + if err != nil { + log.Warnf("failed running the old configuration files cleaner, err: %v", err) + } + }() +} diff --git a/internal/pkg/fileutil/fileutil.go b/internal/pkg/fileutil/fileutil.go new file mode 100644 index 00000000000..86d1db249aa --- /dev/null +++ b/internal/pkg/fileutil/fileutil.go @@ -0,0 +1,46 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package fileutil + +import ( + "errors" + "io/fs" + "os" + "time" +) + +// FileExists returns true if file/dir exists +func FileExists(fp string) (bool, error) { + _, err := os.Stat(fp) + if err != nil { + if errors.Is(err, fs.ErrNotExist) { + return false, nil + } + return false, err + } + return true, nil +} + +// GetModTime returns file modification time +func GetModTime(fp string) (time.Time, error) { + fi, err := os.Stat(fp) + if err != nil { + return time.Time{}, err + } + return fi.ModTime(), nil +} + +// GetModTimeExists returns file modification time and existence status +// Returns no error if the file doesn't exists +func GetModTimeExists(fp string) (time.Time, bool, error) { + modTime, err := GetModTime(fp) + if err != nil { + if errors.Is(err, fs.ErrNotExist) { + return modTime, false, nil + } + return modTime, false, err + } + return modTime, true, nil +} From e043f36d3c22938bfda20a9f791eac961599ce91 Mon Sep 17 00:00:00 2001 From: Michel Laterman <82832767+michel-laterman@users.noreply.github.com> Date: Tue, 21 Jun 2022 08:27:45 -0700 Subject: [PATCH 010/180] Update containerd (#577) --- NOTICE.txt | 12 ++++++------ go.mod | 5 +++-- go.sum | 13 ++++++++----- 3 files changed, 17 insertions(+), 13 deletions(-) diff --git a/NOTICE.txt b/NOTICE.txt index 4b402c790f4..e80beb85357 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -5274,11 +5274,11 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- Dependency : golang.org/x/sys -Version: v0.0.0-20220405052023-b1e9470b6e64 +Version: v0.0.0-20220412211240-33da011f77ad Licence type (autodetected): BSD-3-Clause -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/golang.org/x/sys@v0.0.0-20220405052023-b1e9470b6e64/LICENSE: +Contents of probable licence file $GOMODCACHE/golang.org/x/sys@v0.0.0-20220412211240-33da011f77ad/LICENSE: Copyright (c) 2009 The Go Authors. All rights reserved. @@ -7110,11 +7110,11 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -------------------------------------------------------------------------------- Dependency : github.com/containerd/containerd -Version: v1.5.10 +Version: v1.5.13 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/containerd/containerd@v1.5.10/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/containerd/containerd@v1.5.13/LICENSE: Apache License @@ -15120,11 +15120,11 @@ THE SOFTWARE. -------------------------------------------------------------------------------- Dependency : go.uber.org/goleak -Version: v1.1.11 +Version: v1.1.12 Licence type (autodetected): MIT -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/go.uber.org/goleak@v1.1.11/LICENSE: +Contents of probable licence file $GOMODCACHE/go.uber.org/goleak@v1.1.12/LICENSE: The MIT License (MIT) diff --git a/go.mod b/go.mod index 37c11d9bfb6..5bc60abc07b 100644 --- a/go.mod +++ b/go.mod @@ -47,7 +47,7 @@ require ( golang.org/x/crypto v0.0.0-20210817164053-32db794688a5 golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 golang.org/x/sync v0.0.0-20210220032951-036812b2e83c - golang.org/x/sys v0.0.0-20220405052023-b1e9470b6e64 + golang.org/x/sys v0.0.0-20220412211240-33da011f77ad golang.org/x/tools v0.1.9 google.golang.org/grpc v1.42.0 google.golang.org/protobuf v1.27.1 @@ -65,7 +65,7 @@ require ( github.com/armon/go-radix v1.0.0 // indirect github.com/cavaliercoder/badio v0.0.0-20160213150051-ce5280129e9e // indirect github.com/cenkalti/backoff/v4 v4.1.1 // indirect - github.com/containerd/containerd v1.5.10 // indirect + github.com/containerd/containerd v1.5.13 // indirect github.com/cyphar/filepath-securejoin v0.2.3 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/dnephin/pflag v1.0.7 // indirect @@ -119,6 +119,7 @@ require ( go.elastic.co/apm/v2 v2.0.0 // indirect go.elastic.co/fastjson v1.1.0 // indirect go.uber.org/atomic v1.9.0 // indirect + go.uber.org/goleak v1.1.12 // indirect go.uber.org/multierr v1.8.0 // indirect golang.org/x/mod v0.5.1 // indirect golang.org/x/net v0.0.0-20220225172249-27dd8689420f // indirect diff --git a/go.sum b/go.sum index 91a2ec0da3a..7198e357fbf 100644 --- a/go.sum +++ b/go.sum @@ -92,7 +92,7 @@ github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2 github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00= github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+VxGOoXdC600= github.com/Microsoft/hcsshim v0.8.21/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4= -github.com/Microsoft/hcsshim v0.8.23/go.mod h1:4zegtUJth7lAvFyc6cH2gGQ5B3OFQim01nnU2M8jKDg= +github.com/Microsoft/hcsshim v0.8.24/go.mod h1:4zegtUJth7lAvFyc6cH2gGQ5B3OFQim01nnU2M8jKDg= github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU= github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= @@ -229,6 +229,7 @@ github.com/containerd/cgroups v0.0.0-20200710171044-318312a37340/go.mod h1:s5q4S github.com/containerd/cgroups v0.0.0-20200824123100-0b889c03f102/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo= github.com/containerd/cgroups v0.0.0-20210114181951-8a68de567b68/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= github.com/containerd/cgroups v1.0.1/go.mod h1:0SJrPIenamHDcZhEcJMNBB85rHcUsw4f25ZfBiPYRkU= +github.com/containerd/cgroups v1.0.3/go.mod h1:/ofk34relqNjSGyqPrmEULrO4Sc8LJhvJmWbUCUKqj8= github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= github.com/containerd/console v0.0.0-20191206165004-02ecf6a7291e/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE= @@ -250,8 +251,8 @@ github.com/containerd/containerd v1.5.0-rc.0/go.mod h1:V/IXoMqNGgBlabz3tHD2TWDoT github.com/containerd/containerd v1.5.1/go.mod h1:0DOxVqwDy2iZvrZp2JUx/E+hS0UNTVn7dJnIOwtYR4g= github.com/containerd/containerd v1.5.2/go.mod h1:0DOxVqwDy2iZvrZp2JUx/E+hS0UNTVn7dJnIOwtYR4g= github.com/containerd/containerd v1.5.7/go.mod h1:gyvv6+ugqY25TiXxcZC3L5yOeYgEw0QMhscqVp1AR9c= -github.com/containerd/containerd v1.5.10 h1:3cQ2uRVCkJVcx5VombsE7105Gl9Wrl7ORAO3+4+ogf4= -github.com/containerd/containerd v1.5.10/go.mod h1:fvQqCfadDGga5HZyn3j4+dx56qj2I9YwBrlSdalvJYQ= +github.com/containerd/containerd v1.5.13 h1:XqvKw9i4P7/mFrC3TSM7yV5cwFZ9avXe6M3YANKnzEE= +github.com/containerd/containerd v1.5.13/go.mod h1:3AlCrzKROjIuP3JALsY14n8YtntaUDBu7vek+rPN5Vc= github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20191127005431-f65d91d395eb/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= @@ -1260,8 +1261,9 @@ go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= -go.uber.org/goleak v1.1.11 h1:wy28qYRKZgnJTxGxvye5/wgWr1EKjmUDGYox5mGlRlI= go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= +go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA= +go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= @@ -1562,8 +1564,9 @@ golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20211205182925-97ca703d548d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220405052023-b1e9470b6e64 h1:D1v9ucDTYBtbz5vNuBbAhIMAGhQhJ6Ym5ah3maMVNX4= golang.org/x/sys v0.0.0-20220405052023-b1e9470b6e64/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220412211240-33da011f77ad h1:ntjMns5wyP/fN65tdBD4g8J5w8n015+iIIs9rtjXkY0= +golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= From 86ea452716a6299e3d877bb73c8a2d8a8deb5805 Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Wed, 22 Jun 2022 01:43:51 -0400 Subject: [PATCH 011/180] [Automation] Update elastic stack version to 8.4.0-4fe26f2a for testing (#591) Co-authored-by: apmmachine --- testing/environments/snapshot.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/testing/environments/snapshot.yml b/testing/environments/snapshot.yml index 728f4152900..f7f7fd7d996 100644 --- a/testing/environments/snapshot.yml +++ b/testing/environments/snapshot.yml @@ -3,7 +3,7 @@ version: '2.3' services: elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch:8.4.0-b13123ee-SNAPSHOT + image: docker.elastic.co/elasticsearch/elasticsearch:8.4.0-4fe26f2a-SNAPSHOT # When extend is used it merges healthcheck.tests, see: # https://github.com/docker/compose/issues/8962 # healthcheck: @@ -42,7 +42,7 @@ services: - ./docker/logstash/pki:/etc/pki:ro kibana: - image: docker.elastic.co/kibana/kibana:8.4.0-b13123ee-SNAPSHOT + image: docker.elastic.co/kibana/kibana:8.4.0-4fe26f2a-SNAPSHOT environment: - "ELASTICSEARCH_USERNAME=kibana_system_user" - "ELASTICSEARCH_PASSWORD=testing" From c876ac4879fc901210b338d09b3b87a596bb7969 Mon Sep 17 00:00:00 2001 From: Aleksandr Maus Date: Wed, 22 Jun 2022 20:34:47 -0400 Subject: [PATCH 012/180] Set explicit ExitTimeOut for MacOS agent launchd plist (#594) * Set explicit ExitTimeOut for MacOS agent launchd plist --- internal/pkg/agent/install/svc.go | 63 ++++++++++++++++++++++++++++++- 1 file changed, 61 insertions(+), 2 deletions(-) diff --git a/internal/pkg/agent/install/svc.go b/internal/pkg/agent/install/svc.go index 7148f4acca0..1a3bf50c896 100644 --- a/internal/pkg/agent/install/svc.go +++ b/internal/pkg/agent/install/svc.go @@ -6,6 +6,7 @@ package install import ( "path/filepath" + "runtime" "github.com/kardianos/service" @@ -18,6 +19,12 @@ const ( // ServiceDescription is the description for the service. ServiceDescription = "Elastic Agent is a unified agent to observe, monitor and protect your system." + + // Set the launch daemon ExitTimeOut to 60 seconds in order to allow the agent to shutdown gracefully + // At the moment the version 8.3 & 8.4 of the agent are taking about 11 secs to shutdown + // and the launchd sends SIGKILL after 5 secs which causes the beats processes to be left running orphaned + // depending on the shutdown timing. + darwinServiceExitTimeout = 60 ) // ExecutablePath returns the path for the installed Agents executable. @@ -30,7 +37,7 @@ func ExecutablePath() string { } func newService() (service.Service, error) { - return service.New(nil, &service.Config{ + cfg := &service.Config{ Name: paths.ServiceName, DisplayName: ServiceDisplayName, Description: ServiceDescription, @@ -45,5 +52,57 @@ func newService() (service.Service, error) { "OnFailureDelayDuration": "1s", "OnFailureResetPeriod": 10, }, - }) + } + + if runtime.GOOS == "darwin" { + // The github.com/kardianos/service library doesn't support ExitTimeOut in their prebuilt template. + // This option allows to pass our own template for the launch daemon plist, which is a copy + // of the prebuilt template with added ExitTimeOut option + cfg.Option["LaunchdConfig"] = darwinLaunchdConfig + cfg.Option["ExitTimeOut"] = darwinServiceExitTimeout + } + + return service.New(nil, cfg) } + +// A copy of the launchd plist template from github.com/kardianos/service +// with added .Config.Option.ExitTimeOut option +const darwinLaunchdConfig = ` + + + + Label + {{html .Name}} + ProgramArguments + + {{html .Path}} + {{range .Config.Arguments}} + {{html .}} + {{end}} + + {{if .UserName}}UserName + {{html .UserName}}{{end}} + {{if .ChRoot}}RootDirectory + {{html .ChRoot}}{{end}} + {{if .Config.Option.ExitTimeOut}}ExitTimeOut + {{html .Config.Option.ExitTimeOut}}{{end}} + {{if .WorkingDirectory}}WorkingDirectory + {{html .WorkingDirectory}}{{end}} + SessionCreate + <{{bool .SessionCreate}}/> + KeepAlive + <{{bool .KeepAlive}}/> + RunAtLoad + <{{bool .RunAtLoad}}/> + Disabled + + + StandardOutPath + /usr/local/var/log/{{html .Name}}.out.log + StandardErrorPath + /usr/local/var/log/{{html .Name}}.err.log + + + +` From 89802c6867264a71876a43058081a4aad5ba9b55 Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Thu, 23 Jun 2022 02:00:47 -0400 Subject: [PATCH 013/180] [Automation] Update elastic stack version to 8.4.0-2e32a640 for testing (#599) Co-authored-by: apmmachine --- testing/environments/snapshot.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/testing/environments/snapshot.yml b/testing/environments/snapshot.yml index f7f7fd7d996..1f7885d2eb6 100644 --- a/testing/environments/snapshot.yml +++ b/testing/environments/snapshot.yml @@ -3,7 +3,7 @@ version: '2.3' services: elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch:8.4.0-4fe26f2a-SNAPSHOT + image: docker.elastic.co/elasticsearch/elasticsearch:8.4.0-2e32a640-SNAPSHOT # When extend is used it merges healthcheck.tests, see: # https://github.com/docker/compose/issues/8962 # healthcheck: @@ -42,7 +42,7 @@ services: - ./docker/logstash/pki:/etc/pki:ro kibana: - image: docker.elastic.co/kibana/kibana:8.4.0-4fe26f2a-SNAPSHOT + image: docker.elastic.co/kibana/kibana:8.4.0-2e32a640-SNAPSHOT environment: - "ELASTICSEARCH_USERNAME=kibana_system_user" - "ELASTICSEARCH_PASSWORD=testing" From 11ce21478f207439e109246510cb7c4d9ae2b00c Mon Sep 17 00:00:00 2001 From: Victor Martinez Date: Thu, 23 Jun 2022 09:49:20 +0100 Subject: [PATCH 014/180] ci: enable build notifications as GitHub issues (#595) --- .ci/Jenkinsfile | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.ci/Jenkinsfile b/.ci/Jenkinsfile index 10b0b97a572..68b3c8a7b51 100644 --- a/.ci/Jenkinsfile +++ b/.ci/Jenkinsfile @@ -262,7 +262,9 @@ pipeline { } post { cleanup { - notifyBuildResult(prComment: true) + notifyBuildResult(prComment: true, + githubIssue: isBranch() && currentBuild.currentResult != "SUCCESS", + githubLabels: 'Team:Elastic-Agent-Control-Plane') } } } From 09d924f7a7ec48fbca1dc6c6edf8ec8dbc9d51f0 Mon Sep 17 00:00:00 2001 From: Michel Laterman <82832767+michel-laterman@users.noreply.github.com> Date: Thu, 23 Jun 2022 10:07:29 -0700 Subject: [PATCH 015/180] status identifies failing component, fleet gateway may report degraded, liveness endpoint added (#569) * Add liveness endpoint Add /liveness route to metrics server. This route will report the status from pkg/core/status. fleet-gateway will now report a degraded state if a checkin fails. This may not propogate to fleet-server as a failed checkin means communications between the agent and the server are not working. It may also lead to the server reporting degraded for up to 30s (fleet-server polling time) when teh agent is able to successfully connect. * linter fix * add nolint direcrtive * Linter fix * Review feedback, add doc strings * Rename noop controller file to _test file --- CHANGELOG.next.asciidoc | 1 + .../gateway/fleet/fleet_gateway.go | 46 ++++--- .../gateway/fleet/fleet_gateway_test.go | 130 +++++++++++------- ...ller.go => noop_status_controller_test.go} | 14 +- internal/pkg/agent/cmd/run.go | 6 +- internal/pkg/core/monitoring/server/server.go | 6 +- internal/pkg/core/status/handler.go | 39 ++++++ internal/pkg/core/status/reporter.go | 32 ++++- internal/pkg/core/status/reporter_test.go | 53 +++++++ internal/pkg/testutils/status_reporter.go | 67 +++++++++ 10 files changed, 319 insertions(+), 75 deletions(-) rename internal/pkg/agent/application/gateway/fleet/{noop_status_controller.go => noop_status_controller_test.go} (77%) create mode 100644 internal/pkg/core/status/handler.go create mode 100644 internal/pkg/testutils/status_reporter.go diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 24b4f09c44a..cbe894d63c5 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -185,3 +185,4 @@ - Bump node.js version for heartbeat/synthetics to 16.15.0 - Support scheduled actions and cancellation of pending actions. {issue}393[393] {pull}419[419] - Add `@metadata.input_id` and `@metadata.stream_id` when applying the inject stream processor {pull}527[527] +- Add liveness endpoint, allow fleet-gateway component to report degraded state, add update time and messages to status output. {issue}390[390] {pull}569[569] diff --git a/internal/pkg/agent/application/gateway/fleet/fleet_gateway.go b/internal/pkg/agent/application/gateway/fleet/fleet_gateway.go index 4ff4c34ad42..f5c02d3356a 100644 --- a/internal/pkg/agent/application/gateway/fleet/fleet_gateway.go +++ b/internal/pkg/agent/application/gateway/fleet/fleet_gateway.go @@ -2,6 +2,8 @@ // or more contributor license agreements. Licensed under the Elastic License; // you may not use this file except in compliance with the Elastic License. +// Package fleet handles interactions between the elastic-agent and fleet-server. +// Specifically it will handle agent checkins, and action queueing/dispatch. package fleet import ( @@ -75,23 +77,24 @@ type actionQueue interface { } type fleetGateway struct { - bgContext context.Context - log *logger.Logger - dispatcher pipeline.Dispatcher - client client.Sender - scheduler scheduler.Scheduler - backoff backoff.Backoff - settings *fleetGatewaySettings - agentInfo agentInfo - reporter fleetReporter - done chan struct{} - wg sync.WaitGroup - acker store.FleetAcker - unauthCounter int - statusController status.Controller - statusReporter status.Reporter - stateStore stateStore - queue actionQueue + bgContext context.Context + log *logger.Logger + dispatcher pipeline.Dispatcher + client client.Sender + scheduler scheduler.Scheduler + backoff backoff.Backoff + settings *fleetGatewaySettings + agentInfo agentInfo + reporter fleetReporter + done chan struct{} + wg sync.WaitGroup + acker store.FleetAcker + unauthCounter int + checkinFailCounter int + statusController status.Controller + statusReporter status.Reporter + stateStore stateStore + queue actionQueue } // New creates a new fleet gateway @@ -286,6 +289,7 @@ func (f *fleetGateway) doExecute() (*fleetapi.CheckinResponse, error) { f.log.Debugf("Checking started") resp, err := f.execute(f.bgContext) if err != nil { + f.checkinFailCounter++ f.log.Errorf("Could not communicate with fleet-server Checking API will retry, error: %s", err) if !f.backoff.Wait() { // Something bad has happened and we log it and we should update our current state. @@ -299,8 +303,16 @@ func (f *fleetGateway) doExecute() (*fleetapi.CheckinResponse, error) { f.statusReporter.Update(state.Failed, err.Error(), nil) return nil, err } + if f.checkinFailCounter > 1 { + // Update status reporter for gateway to degraded when there are two consecutive failures. + // Note that this may not propagate to fleet-server as the agent is having issues checking in. + // It may also (falsely) report a degraded session for 30s if it is eventually successful. + // However this component will allow the agent to report fleet gateway degredation locally. + f.statusReporter.Update(state.Degraded, fmt.Sprintf("checkin failed: %v", err), nil) + } continue } + f.checkinFailCounter = 0 // Request was successful, return the collected actions. return resp, nil } diff --git a/internal/pkg/agent/application/gateway/fleet/fleet_gateway_test.go b/internal/pkg/agent/application/gateway/fleet/fleet_gateway_test.go index a9b9380519f..6ce62448276 100644 --- a/internal/pkg/agent/application/gateway/fleet/fleet_gateway_test.go +++ b/internal/pkg/agent/application/gateway/fleet/fleet_gateway_test.go @@ -26,12 +26,14 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/agent/application/paths" "github.com/elastic/elastic-agent/internal/pkg/agent/storage" "github.com/elastic/elastic-agent/internal/pkg/agent/storage/store" + "github.com/elastic/elastic-agent/internal/pkg/core/state" "github.com/elastic/elastic-agent/internal/pkg/fleetapi" noopacker "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker/noop" repo "github.com/elastic/elastic-agent/internal/pkg/reporter" fleetreporter "github.com/elastic/elastic-agent/internal/pkg/reporter/fleet" fleetreporterConfig "github.com/elastic/elastic-agent/internal/pkg/reporter/fleet/config" "github.com/elastic/elastic-agent/internal/pkg/scheduler" + "github.com/elastic/elastic-agent/internal/pkg/testutils" "github.com/elastic/elastic-agent/pkg/core/logger" ) @@ -705,59 +707,95 @@ func TestRetriesOnFailures(t *testing.T) { Backoff: backoffSettings{Init: 100 * time.Millisecond, Max: 5 * time.Second}, } - t.Run("When the gateway fails to communicate with the checkin API we will retry", - withGateway(agentInfo, settings, func( - t *testing.T, - gateway gateway.FleetGateway, - client *testingClient, - dispatcher *testingDispatcher, - scheduler *scheduler.Stepper, - rep repo.Backend, - ) { - fail := func(_ http.Header, _ io.Reader) (*http.Response, error) { - return wrapStrToResp(http.StatusInternalServerError, "something is bad"), nil - } - clientWaitFn := client.Answer(fail) - err := gateway.Start() - require.NoError(t, err) + t.Run("When the gateway fails to communicate with the checkin API we will retry", func(t *testing.T) { + scheduler := scheduler.NewStepper() + client := newTestingClient() + dispatcher := newTestingDispatcher() + log, _ := logger.New("fleet_gateway", false) + rep := getReporter(agentInfo, log, t) - _ = rep.Report(context.Background(), &testStateEvent{}) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - // Initial tick is done out of bound so we can block on channels. - scheduler.Next() + diskStore := storage.NewDiskStore(paths.AgentStateStoreFile()) + stateStore, err := store.NewStateStore(log, diskStore) + require.NoError(t, err) - // Simulate a 500 errors for the next 3 calls. - <-clientWaitFn - <-clientWaitFn - <-clientWaitFn - - // API recover - waitFn := ackSeq( - client.Answer(func(_ http.Header, body io.Reader) (*http.Response, error) { - cr := &request{} - content, err := ioutil.ReadAll(body) - if err != nil { - t.Fatal(err) - } - err = json.Unmarshal(content, &cr) - if err != nil { - t.Fatal(err) - } + queue := &mockQueue{} + queue.On("DequeueActions").Return([]fleetapi.Action{}) + queue.On("Actions").Return([]fleetapi.Action{}) + + fleetReporter := &testutils.MockReporter{} + fleetReporter.On("Update", state.Degraded, mock.Anything, mock.Anything).Times(2) + fleetReporter.On("Update", mock.Anything, mock.Anything, mock.Anything).Maybe() + fleetReporter.On("Unregister").Maybe() + + statusController := &testutils.MockController{} + statusController.On("RegisterComponent", "gateway").Return(fleetReporter).Once() + statusController.On("StatusString").Return("string") + + gateway, err := newFleetGatewayWithScheduler( + ctx, + log, + settings, + agentInfo, + client, + dispatcher, + scheduler, + rep, + noopacker.NewAcker(), + statusController, + stateStore, + queue, + ) + require.NoError(t, err) + + fail := func(_ http.Header, _ io.Reader) (*http.Response, error) { + return wrapStrToResp(http.StatusInternalServerError, "something is bad"), nil + } + clientWaitFn := client.Answer(fail) + err = gateway.Start() + require.NoError(t, err) - require.Equal(t, 1, len(cr.Events)) + _ = rep.Report(context.Background(), &testStateEvent{}) - resp := wrapStrToResp(http.StatusOK, `{ "actions": [] }`) - return resp, nil - }), + // Initial tick is done out of bound so we can block on channels. + scheduler.Next() - dispatcher.Answer(func(actions ...fleetapi.Action) error { - require.Equal(t, 0, len(actions)) - return nil - }), - ) + // Simulate a 500 errors for the next 3 calls. + <-clientWaitFn + <-clientWaitFn + <-clientWaitFn - waitFn() - })) + // API recover + waitFn := ackSeq( + client.Answer(func(_ http.Header, body io.Reader) (*http.Response, error) { + cr := &request{} + content, err := ioutil.ReadAll(body) + if err != nil { + t.Fatal(err) + } + err = json.Unmarshal(content, &cr) + if err != nil { + t.Fatal(err) + } + + require.Equal(t, 1, len(cr.Events)) + + resp := wrapStrToResp(http.StatusOK, `{ "actions": [] }`) + return resp, nil + }), + + dispatcher.Answer(func(actions ...fleetapi.Action) error { + require.Equal(t, 0, len(actions)) + return nil + }), + ) + + waitFn() + statusController.AssertExpectations(t) + fleetReporter.AssertExpectations(t) + }) t.Run("The retry loop is interruptible", withGateway(agentInfo, &fleetGatewaySettings{ diff --git a/internal/pkg/agent/application/gateway/fleet/noop_status_controller.go b/internal/pkg/agent/application/gateway/fleet/noop_status_controller_test.go similarity index 77% rename from internal/pkg/agent/application/gateway/fleet/noop_status_controller.go rename to internal/pkg/agent/application/gateway/fleet/noop_status_controller_test.go index d5097655a63..bbae6958ab6 100644 --- a/internal/pkg/agent/application/gateway/fleet/noop_status_controller.go +++ b/internal/pkg/agent/application/gateway/fleet/noop_status_controller_test.go @@ -5,21 +5,25 @@ package fleet import ( + "net/http" + "github.com/elastic/elastic-agent/internal/pkg/core/state" "github.com/elastic/elastic-agent/internal/pkg/core/status" ) type noopController struct{} +func (*noopController) SetAgentID(_ string) {} func (*noopController) RegisterComponent(_ string) status.Reporter { return &noopReporter{} } func (*noopController) RegisterComponentWithPersistance(_ string, _ bool) status.Reporter { return &noopReporter{} } -func (*noopController) RegisterApp(_ string, _ string) status.Reporter { return &noopReporter{} } -func (*noopController) Status() status.AgentStatus { return status.AgentStatus{Status: status.Healthy} } -func (*noopController) StatusCode() status.AgentStatusCode { return status.Healthy } -func (*noopController) UpdateStateID(_ string) {} -func (*noopController) StatusString() string { return "online" } +func (*noopController) RegisterApp(_ string, _ string) status.Reporter { return &noopReporter{} } +func (*noopController) Status() status.AgentStatus { return status.AgentStatus{Status: status.Healthy} } +func (*noopController) StatusCode() status.AgentStatusCode { return status.Healthy } +func (*noopController) UpdateStateID(_ string) {} +func (*noopController) StatusString() string { return "online" } +func (*noopController) ServeHTTP(_ http.ResponseWriter, _ *http.Request) {} type noopReporter struct{} diff --git a/internal/pkg/agent/cmd/run.go b/internal/pkg/agent/cmd/run.go index 723631b7960..ad508af9086 100644 --- a/internal/pkg/agent/cmd/run.go +++ b/internal/pkg/agent/cmd/run.go @@ -169,6 +169,7 @@ func run(override cfgOverrider) error { rex := reexec.NewManager(rexLogger, execPath) statusCtrl := status.NewController(logger) + statusCtrl.SetAgentID(agentInfo.AgentID()) tracer, err := initTracer(agentName, release.Version(), cfg.Settings.MonitoringConfig) if err != nil { @@ -199,7 +200,7 @@ func run(override cfgOverrider) error { control.SetRouteFn(app.Routes) control.SetMonitoringCfg(cfg.Settings.MonitoringConfig) - serverStopFn, err := setupMetrics(agentInfo, logger, cfg.Settings.DownloadConfig.OS(), cfg.Settings.MonitoringConfig, app, tracer) + serverStopFn, err := setupMetrics(agentInfo, logger, cfg.Settings.DownloadConfig.OS(), cfg.Settings.MonitoringConfig, app, tracer, statusCtrl) if err != nil { return err } @@ -354,6 +355,7 @@ func setupMetrics( cfg *monitoringCfg.MonitoringConfig, app application.Application, tracer *apm.Tracer, + statusCtrl status.Controller, ) (func() error, error) { if err := report.SetupMetrics(logger, agentName, version.GetDefaultVersion()); err != nil { return nil, err @@ -366,7 +368,7 @@ func setupMetrics( } bufferEnabled := cfg.HTTP.Buffer != nil && cfg.HTTP.Buffer.Enabled - s, err := monitoringServer.New(logger, endpointConfig, monitoring.GetNamespace, app.Routes, isProcessStatsEnabled(cfg.HTTP), bufferEnabled, tracer) + s, err := monitoringServer.New(logger, endpointConfig, monitoring.GetNamespace, app.Routes, isProcessStatsEnabled(cfg.HTTP), bufferEnabled, tracer, statusCtrl) if err != nil { return nil, errors.New(err, "could not start the HTTP server for the API") } diff --git a/internal/pkg/core/monitoring/server/server.go b/internal/pkg/core/monitoring/server/server.go index e5929909158..e0289ae72d0 100644 --- a/internal/pkg/core/monitoring/server/server.go +++ b/internal/pkg/core/monitoring/server/server.go @@ -33,6 +33,7 @@ func New( enableProcessStats bool, enableBuffer bool, tracer *apm.Tracer, + statusController http.Handler, ) (*api.Server, error) { if err := createAgentMonitoringDrop(endpointConfig.Host); err != nil { // log but ignore @@ -44,7 +45,7 @@ func New( return nil, err } - return exposeMetricsEndpoint(log, cfg, ns, routesFetchFn, enableProcessStats, enableBuffer, tracer) + return exposeMetricsEndpoint(log, cfg, ns, routesFetchFn, enableProcessStats, enableBuffer, tracer, statusController) } func exposeMetricsEndpoint( @@ -55,6 +56,7 @@ func exposeMetricsEndpoint( enableProcessStats bool, enableBuffer bool, tracer *apm.Tracer, + statusController http.Handler, ) (*api.Server, error) { r := mux.NewRouter() if tracer != nil { @@ -63,6 +65,8 @@ func exposeMetricsEndpoint( statsHandler := statsHandler(ns("stats")) r.Handle("/stats", createHandler(statsHandler)) + r.Handle("/liveness", statusController) + if enableProcessStats { r.HandleFunc("/processes", processesHandler(routesFetchFn)) r.Handle("/processes/{processID}", createHandler(processHandler(statsHandler))) diff --git a/internal/pkg/core/status/handler.go b/internal/pkg/core/status/handler.go new file mode 100644 index 00000000000..2e7476901c5 --- /dev/null +++ b/internal/pkg/core/status/handler.go @@ -0,0 +1,39 @@ +package status + +import ( + "encoding/json" + "net/http" + "time" +) + +// LivenessResponse is the response body for the liveness endpoint. +type LivenessResponse struct { + ID string `json:"id"` + Status string `json:"status"` + Message string `json:"message"` + UpdateTime time.Time `json:"update_timestamp"` +} + +// ServeHTTP is an HTTP Handler for the status controller. +// Respose code is 200 for a healthy agent, and 503 otherwise. +// Response body is a JSON object that contains the agent ID, status, message, and the last status update time. +func (r *controller) ServeHTTP(wr http.ResponseWriter, req *http.Request) { + s := r.Status() + lr := LivenessResponse{ + ID: r.agentID, + Status: s.Status.String(), + Message: s.Message, + UpdateTime: s.UpdateTime, + } + status := http.StatusOK + if s.Status != Healthy { + status = http.StatusServiceUnavailable + } + + wr.Header().Set("Content-Type", "application/json") + wr.WriteHeader(status) + enc := json.NewEncoder(wr) + if err := enc.Encode(lr); err != nil { + r.log.Errorf("Unable to encode liveness response: %v", err) + } +} diff --git a/internal/pkg/core/status/reporter.go b/internal/pkg/core/status/reporter.go index 04c8251fa92..92632af2ed5 100644 --- a/internal/pkg/core/status/reporter.go +++ b/internal/pkg/core/status/reporter.go @@ -2,11 +2,15 @@ // or more contributor license agreements. Licensed under the Elastic License; // you may not use this file except in compliance with the Elastic License. +// Package status handles process status reporting. package status import ( + "fmt" + "net/http" "reflect" "sync" + "time" "github.com/google/uuid" @@ -47,10 +51,12 @@ type AgentStatus struct { Status AgentStatusCode Message string Applications []AgentApplicationStatus + UpdateTime time.Time } // Controller takes track of component statuses. type Controller interface { + SetAgentID(string) RegisterComponent(string) Reporter RegisterComponentWithPersistance(string, bool) Reporter RegisterApp(id string, name string) Reporter @@ -58,15 +64,19 @@ type Controller interface { StatusCode() AgentStatusCode StatusString() string UpdateStateID(string) + ServeHTTP(http.ResponseWriter, *http.Request) } type controller struct { mx sync.Mutex status AgentStatusCode + message string + updateTime time.Time reporters map[string]*reporter appReporters map[string]*reporter log *logger.Logger stateID string + agentID string } // NewController creates a new reporter. @@ -79,6 +89,14 @@ func NewController(log *logger.Logger) Controller { } } +// SetAgentID sets the agentID of the controller +// The AgentID may be used in the handler output. +func (r *controller) SetAgentID(agentID string) { + r.mx.Lock() + defer r.mx.Unlock() + r.agentID = agentID +} + // UpdateStateID cleans health when new configuration is received. // To prevent reporting failures from previous configuration. func (r *controller) UpdateStateID(stateID string) { @@ -175,8 +193,9 @@ func (r *controller) Status() AgentStatus { } return AgentStatus{ Status: r.status, - Message: "", + Message: r.message, Applications: apps, + UpdateTime: r.updateTime, } } @@ -189,12 +208,14 @@ func (r *controller) StatusCode() AgentStatusCode { func (r *controller) updateStatus() { status := Healthy + message := "" r.mx.Lock() for id, rep := range r.reporters { s := statusToAgentStatus(rep.status) if s > status { status = s + message = fmt.Sprintf("component %s: %s", id, rep.message) } r.log.Debugf("'%s' has status '%s'", id, s) @@ -207,6 +228,7 @@ func (r *controller) updateStatus() { s := statusToAgentStatus(rep.status) if s > status { status = s + message = fmt.Sprintf("app %s: %s", id, rep.message) } r.log.Debugf("'%s' has status '%s'", id, s) @@ -217,15 +239,17 @@ func (r *controller) updateStatus() { } if r.status != status { - r.logStatus(status) + r.logStatus(status, message) r.status = status + r.message = message + r.updateTime = time.Now().UTC() } r.mx.Unlock() } -func (r *controller) logStatus(status AgentStatusCode) { +func (r *controller) logStatus(status AgentStatusCode, message string) { logFn := r.log.Infof if status == Degraded { logFn = r.log.Warnf @@ -233,7 +257,7 @@ func (r *controller) logStatus(status AgentStatusCode) { logFn = r.log.Errorf } - logFn("Elastic Agent status changed to: '%s'", status) + logFn("Elastic Agent status changed to %q: %q", status, message) } // StatusString retrieves human readable string of current agent status. diff --git a/internal/pkg/core/status/reporter_test.go b/internal/pkg/core/status/reporter_test.go index 0d44e402798..09a66661fc5 100644 --- a/internal/pkg/core/status/reporter_test.go +++ b/internal/pkg/core/status/reporter_test.go @@ -6,6 +6,7 @@ package status import ( "testing" + "time" "github.com/stretchr/testify/assert" @@ -98,4 +99,56 @@ func TestReporter(t *testing.T) { assert.Equal(t, Degraded, r.StatusCode()) assert.Equal(t, "degraded", r.StatusString()) }) + + t.Run("Check agent status components healthy", func(t *testing.T) { + r := NewController(l) + r1 := r.RegisterComponent("r1") + r2 := r.RegisterComponent("r2") + r3 := r.RegisterComponent("r3") + + r1.Update(state.Healthy, "", nil) + r2.Update(state.Healthy, "", nil) + r3.Update(state.Healthy, "", nil) + + s := r.Status() + assert.Equal(t, Healthy, s.Status) + assert.Equal(t, "", s.Message) + assert.Equal(t, time.Time{}, s.UpdateTime) + }) + + //nolint:dupl // test case + t.Run("Check agent status one component degraded", func(t *testing.T) { + r := NewController(l) + r1 := r.RegisterComponent("r1") + r2 := r.RegisterComponent("r2") + r3 := r.RegisterComponent("r3") + + r1.Update(state.Healthy, "", nil) + r2.Update(state.Degraded, "degraded", nil) + r3.Update(state.Healthy, "", nil) + + s := r.Status() + assert.Equal(t, Degraded, s.Status) + assert.Contains(t, s.Message, "component r2") + assert.Contains(t, s.Message, "degraded") + assert.NotEqual(t, time.Time{}, s.UpdateTime) + }) + + //nolint:dupl // test case + t.Run("Check agent status one component failed", func(t *testing.T) { + r := NewController(l) + r1 := r.RegisterComponent("r1") + r2 := r.RegisterComponent("r2") + r3 := r.RegisterComponent("r3") + + r1.Update(state.Healthy, "", nil) + r2.Update(state.Failed, "failed", nil) + r3.Update(state.Degraded, "degraded", nil) + + s := r.Status() + assert.Equal(t, Failed, s.Status) + assert.Contains(t, s.Message, "component r2") + assert.Contains(t, s.Message, "failed") + assert.NotEqual(t, time.Time{}, s.UpdateTime) + }) } diff --git a/internal/pkg/testutils/status_reporter.go b/internal/pkg/testutils/status_reporter.go new file mode 100644 index 00000000000..a045e50304a --- /dev/null +++ b/internal/pkg/testutils/status_reporter.go @@ -0,0 +1,67 @@ +package testutils + +import ( + "net/http" + + "github.com/elastic/elastic-agent/internal/pkg/core/state" + "github.com/elastic/elastic-agent/internal/pkg/core/status" + "github.com/stretchr/testify/mock" +) + +type MockController struct { + mock.Mock +} + +func (m *MockController) SetAgentID(id string) { + m.Called(id) +} + +func (m *MockController) RegisterComponent(id string) status.Reporter { + args := m.Called(id) + return args.Get(0).(status.Reporter) +} + +func (m *MockController) RegisterComponentWithPersistance(id string, b bool) status.Reporter { + args := m.Called(id, b) + return args.Get(0).(status.Reporter) +} + +func (m *MockController) RegisterApp(id, name string) status.Reporter { + args := m.Called(id, name) + return args.Get(0).(status.Reporter) +} + +func (m *MockController) Status() status.AgentStatus { + args := m.Called() + return args.Get(0).(status.AgentStatus) +} + +func (m *MockController) StatusCode() status.AgentStatusCode { + args := m.Called() + return args.Get(0).(status.AgentStatusCode) +} + +func (m *MockController) StatusString() string { + args := m.Called() + return args.String(0) +} + +func (m *MockController) UpdateStateID(id string) { + m.Called(id) +} + +func (m *MockController) ServeHTTP(wr http.ResponseWriter, req *http.Request) { + m.Called(wr, req) +} + +type MockReporter struct { + mock.Mock +} + +func (m *MockReporter) Update(state state.Status, message string, meta map[string]interface{}) { + m.Called(state, message, meta) +} + +func (m *MockReporter) Unregister() { + m.Called() +} From daa5c5f4ad194f9d8226adcf084373e3fd582418 Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Fri, 24 Jun 2022 01:44:50 -0400 Subject: [PATCH 016/180] [Automation] Update elastic stack version to 8.4.0-722a7d79 for testing (#607) Co-authored-by: apmmachine --- testing/environments/snapshot.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/testing/environments/snapshot.yml b/testing/environments/snapshot.yml index 1f7885d2eb6..8be3d1e8d1b 100644 --- a/testing/environments/snapshot.yml +++ b/testing/environments/snapshot.yml @@ -3,7 +3,7 @@ version: '2.3' services: elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch:8.4.0-2e32a640-SNAPSHOT + image: docker.elastic.co/elasticsearch/elasticsearch:8.4.0-722a7d79-SNAPSHOT # When extend is used it merges healthcheck.tests, see: # https://github.com/docker/compose/issues/8962 # healthcheck: @@ -42,7 +42,7 @@ services: - ./docker/logstash/pki:/etc/pki:ro kibana: - image: docker.elastic.co/kibana/kibana:8.4.0-2e32a640-SNAPSHOT + image: docker.elastic.co/kibana/kibana:8.4.0-722a7d79-SNAPSHOT environment: - "ELASTICSEARCH_USERNAME=kibana_system_user" - "ELASTICSEARCH_PASSWORD=testing" From 90a1f582dcf5d073bf8ddce387fd78e2261e16b4 Mon Sep 17 00:00:00 2001 From: Victor Martinez Date: Fri, 24 Jun 2022 12:38:25 +0100 Subject: [PATCH 017/180] ci: enable flaky test detector (#605) --- .ci/Jenkinsfile | 1 + 1 file changed, 1 insertion(+) diff --git a/.ci/Jenkinsfile b/.ci/Jenkinsfile index 68b3c8a7b51..6c98375e5fb 100644 --- a/.ci/Jenkinsfile +++ b/.ci/Jenkinsfile @@ -263,6 +263,7 @@ pipeline { post { cleanup { notifyBuildResult(prComment: true, + analyzeFlakey: !isTag(), jobName: getFlakyJobName(withBranch: (isPR() ? env.CHANGE_TARGET : env.BRANCH_NAME)), githubIssue: isBranch() && currentBuild.currentResult != "SUCCESS", githubLabels: 'Team:Elastic-Agent-Control-Plane') } From 79bb67ec61db59032cbbfd352b29ba867dc4efcf Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Mon, 27 Jun 2022 01:47:56 -0400 Subject: [PATCH 018/180] [Automation] Update elastic stack version to 8.4.0-210dd487 for testing (#620) Co-authored-by: apmmachine --- testing/environments/snapshot.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/testing/environments/snapshot.yml b/testing/environments/snapshot.yml index 8be3d1e8d1b..2a4560af874 100644 --- a/testing/environments/snapshot.yml +++ b/testing/environments/snapshot.yml @@ -3,7 +3,7 @@ version: '2.3' services: elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch:8.4.0-722a7d79-SNAPSHOT + image: docker.elastic.co/elasticsearch/elasticsearch:8.4.0-210dd487-SNAPSHOT # When extend is used it merges healthcheck.tests, see: # https://github.com/docker/compose/issues/8962 # healthcheck: @@ -42,7 +42,7 @@ services: - ./docker/logstash/pki:/etc/pki:ro kibana: - image: docker.elastic.co/kibana/kibana:8.4.0-722a7d79-SNAPSHOT + image: docker.elastic.co/kibana/kibana:8.4.0-210dd487-SNAPSHOT environment: - "ELASTICSEARCH_USERNAME=kibana_system_user" - "ELASTICSEARCH_PASSWORD=testing" From f69d5b1fd260e7d64906076c28d98491fd90e346 Mon Sep 17 00:00:00 2001 From: Victor Martinez Date: Mon, 27 Jun 2022 11:13:24 +0100 Subject: [PATCH 019/180] mergify: remove backport automation for non active branches (#615) --- .mergify.yml | 26 -------------------------- 1 file changed, 26 deletions(-) diff --git a/.mergify.yml b/.mergify.yml index 6e1e3c5f651..00b49806e47 100644 --- a/.mergify.yml +++ b/.mergify.yml @@ -181,32 +181,6 @@ pull_request_rules: labels: - "backport" title: "[{{ destination_branch }}](backport #{{ number }}) {{ title }}" - - name: backport patches to 8.0 branch - conditions: - - merged - - label=backport-v8.0.0 - actions: - backport: - assignees: - - "{{ author }}" - branches: - - "8.0" - labels: - - "backport" - title: "[{{ destination_branch }}](backport #{{ number }}) {{ title }}" - - name: backport patches to 8.1 branch - conditions: - - merged - - label=backport-v8.1.0 - actions: - backport: - assignees: - - "{{ author }}" - branches: - - "8.1" - labels: - - "backport" - title: "[{{ destination_branch }}](backport #{{ number }}) {{ title }}" - name: backport patches to 8.2 branch conditions: - merged From 43ba98daa1f4acafa89872419ead60f095b64969 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Manuel=20de=20la=20Pe=C3=B1a?= Date: Mon, 27 Jun 2022 15:08:18 +0200 Subject: [PATCH 020/180] chore: use elastic-agent profile to run the E2E tests (#610) --- .ci/Jenkinsfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.ci/Jenkinsfile b/.ci/Jenkinsfile index 6c98375e5fb..10c6c1a4347 100644 --- a/.ci/Jenkinsfile +++ b/.ci/Jenkinsfile @@ -251,7 +251,7 @@ pipeline { } steps { // TODO: what's the testMatrixFile to be used if any - runE2E(testMatrixFile: '', + runE2E(testMatrixFile: '.ci/.e2e-tests-for-elastic-agent.yaml', beatVersion: "${env.BEAT_VERSION}-SNAPSHOT", elasticAgentVersion: "${env.BEAT_VERSION}-SNAPSHOT", gitHubCheckName: "e2e-tests", From 662a07ac5037f9b098bb28899df4500379759d71 Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Tue, 28 Jun 2022 01:35:09 -0400 Subject: [PATCH 021/180] [Automation] Update elastic stack version to 8.4.0-a6aa9f3b for testing (#631) Co-authored-by: apmmachine --- testing/environments/snapshot.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/testing/environments/snapshot.yml b/testing/environments/snapshot.yml index 2a4560af874..70d8eb97f16 100644 --- a/testing/environments/snapshot.yml +++ b/testing/environments/snapshot.yml @@ -3,7 +3,7 @@ version: '2.3' services: elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch:8.4.0-210dd487-SNAPSHOT + image: docker.elastic.co/elasticsearch/elasticsearch:8.4.0-a6aa9f3b-SNAPSHOT # When extend is used it merges healthcheck.tests, see: # https://github.com/docker/compose/issues/8962 # healthcheck: @@ -42,7 +42,7 @@ services: - ./docker/logstash/pki:/etc/pki:ro kibana: - image: docker.elastic.co/kibana/kibana:8.4.0-210dd487-SNAPSHOT + image: docker.elastic.co/kibana/kibana:8.4.0-a6aa9f3b-SNAPSHOT environment: - "ELASTICSEARCH_USERNAME=kibana_system_user" - "ELASTICSEARCH_PASSWORD=testing" From 5c16e0862d357a54269974f443a32fddd6092cf5 Mon Sep 17 00:00:00 2001 From: Anderson Queiroz Date: Wed, 29 Jun 2022 15:05:34 +0200 Subject: [PATCH 022/180] add macros pointing to new agent's repo and fix old macro calls (#458) --- CHANGELOG.asciidoc | 215 +++++++++++++-------------- CHANGELOG.next.asciidoc | 315 ++++++++++++++++++++-------------------- 2 files changed, 268 insertions(+), 262 deletions(-) diff --git a/CHANGELOG.asciidoc b/CHANGELOG.asciidoc index fad7186655f..a2b19fb1a90 100644 --- a/CHANGELOG.asciidoc +++ b/CHANGELOG.asciidoc @@ -1,71 +1,74 @@ // Use these for links to issue and pulls. Note issues and pulls redirect one to // each other on Github, so don't worry too much on using the right prefix. -:issue: https://github.com/elastic/beats/issues/ -:pull: https://github.com/elastic/beats/pull/ +:issue-beats: https://github.com/elastic/beats/issues/ +:pull-beats: https://github.com/elastic/beats/pull/ + +:issue: https://github.com/elastic/elastic-agent/issues/ +:pull: https://github.com/elastic/elastic-agent/pull/ [[release-notes-7.9.0]] === Elastic Agent version 7.9.0 ==== Breaking changes -- Change fleet.yml structure, causes upgraded agent to register as new agent {pull}19248[19248] -- Remove obfuscation of fleet.yml, causes re-enroll of agent to Fleet {pull}19678[19678] -- Rename enroll --ca_sha256 to --ca-sha256 {pull}19900[19900] -- Rename enroll --certificate_authorities to --certificate-authorities {pull}19900[19900] -- Don't build 32 bits version of Elastic Agent. {issue}25533[25533] +- Change fleet.yml structure, causes upgraded agent to register as new agent {pull-beats}[19248] +- Remove obfuscation of fleet.yml, causes re-enroll of agent to Fleet {pull-beats}[19678] +- Rename enroll --ca_sha256 to --ca-sha256 {pull-beats}[19900] +- Rename enroll --certificate_authorities to --certificate-authorities {pull-beats}[19900] +- Don't build 32 bits version of Elastic Agent. {issue-beats}[25533] ==== Bugfixes -- Fix install service script for windows {pull}18814[18814] -- Properly stops subprocess on shutdown {pull}19567[19567] -- Forward revision number of the configuration to the endpoint. {pull}19759[19759] -- Remove support for logs type and use logfile {pull}19761[19761] -- Avoid comparing uncomparable types on enroll {issue}19976[19976] -- Fix issues with merging of elastic-agent.yml and fleet.yml {pull}20026[20026] -- Unzip failures on Windows 8/Windows server 2012 {pull}20088[20088] -- Fix failing unit tests on windows {pull}20127[20127] -- Prevent closing closed reader {pull}20214[20214] -- Improve GRPC stop to be more relaxed {pull}20118[20118] -- Fix Windows service installation script {pull}20203[20203] -- Fix timeout issue stopping service applications {pull}20256[20256] -- Fix incorrect hash when upgrading agent {pull}22322[22322] -- Fix refresh of monitoring configuration {pull}23619[23619] -- Fixed nil pointer during unenroll {pull}23609[23609] -- Fixed reenroll scenario {pull}23686[23686] -- Fixed Monitoring filebeat and metricbeat not connecting to Agent over GRPC {pull}23843[23843] -- Fixed make status readable in the log. {pull}23849[23849] -- Windows agent doesn't uninstall with a lowercase `c:` drive in the path {pull}23998[23998] -- Fix reloading of log level for services {pull}24055[24055] -- Fix: Successfully installed and enrolled agent running standalone{pull}24128[24128] -- Make installer atomic on windows {pull}24253[24253] -- Remove installed services on agent uninstall {pull}24151[24151] -- Fix failing installation on windows 7 {pull}24387[24387] -- Fix capabilities resolution in inspect command {pull}24346[24346] -- Fix windows installer during enroll {pull}24343[24343] -- Logging to file disabled on enroll {issue}24173[24173] -- Prevent uninstall failures on empty config {pull}24838[24838] -- Fix issue with FLEET_CA not being used with Fleet Server in container {pull}26529[26529] +- Fix install service script for windows {pull-beats}[18814] +- Properly stops subprocess on shutdown {pull-beats}[19567] +- Forward revision number of the configuration to the endpoint. {pull-beats}[19759] +- Remove support for logs type and use logfile {pull-beats}[19761] +- Avoid comparing uncomparable types on enroll {issue-beats}[19976] +- Fix issues with merging of elastic-agent.yml and fleet.yml {pull-beats}[20026] +- Unzip failures on Windows 8/Windows server 2012 {pull-beats}[20088] +- Fix failing unit tests on windows {pull-beats}[20127] +- Prevent closing closed reader {pull-beats}[20214] +- Improve GRPC stop to be more relaxed {pull-beats}[20118] +- Fix Windows service installation script {pull-beats}[20203] +- Fix timeout issue stopping service applications {pull-beats}[20256] +- Fix incorrect hash when upgrading agent {pull-beats}[22322] +- Fix refresh of monitoring configuration {pull-beats}[23619] +- Fixed nil pointer during unenroll {pull-beats}[23609] +- Fixed reenroll scenario {pull-beats}[23686] +- Fixed Monitoring filebeat and metricbeat not connecting to Agent over GRPC {pull-beats}[23843] +- Fixed make status readable in the log. {pull-beats}[23849] +- Windows agent doesn't uninstall with a lowercase `c:` drive in the path {pull-beats}[23998] +- Fix reloading of log level for services {pull-beats}[24055] +- Fix: Successfully installed and enrolled agent running standalone{pull-beats}[24128] +- Make installer atomic on windows {pull-beats}[24253] +- Remove installed services on agent uninstall {pull-beats}[24151] +- Fix failing installation on windows 7 {pull-beats}[24387] +- Fix capabilities resolution in inspect command {pull-beats}[24346] +- Fix windows installer during enroll {pull-beats}[24343] +- Logging to file disabled on enroll {issue-beats}[24173] +- Prevent uninstall failures on empty config {pull-beats}[24838] +- Fix issue with FLEET_CA not being used with Fleet Server in container {pull-beats}[26529] ==== New features -- Change monitoring defaults for agent {pull}18927[18927] -- Agent verifies packages before using them {pull}18876[18876] -- Change stream.* to dataset.* fields {pull}18967[18967] -- Agent now runs the GRPC server and spawned application connect by to Agent {pull}18973[18973] -- Rename input.type logs to logfile {pull}19360[19360] -- Agent now installs/uninstalls Elastic Endpoint {pull}19248[19248] -- Agent now downloads Elastic Endpoint {pull}19503[19503] -- Refuse invalid stream values in configuration {pull}19587[19587] -- Agent now load balances across multiple Kibana instances {pull}19628[19628] -- Configuration cleanup {pull}19848[19848] -- Agent now sends its own logs to elasticsearch {pull}19811[19811] -- Add --insecure option to enroll command {pull}19900[19900] -- Will retry to enroll if the server return a 429. {pull}19918[19811] -- Add --staging option to enroll command {pull}20026[20026] -- Add `event.dataset` to all events {pull}20076[20076] -- Send datastreams fields {pull}20416[20416] -- Agent supports capabilities definition {pull}23848[23848] -- Restart process on output change {pull}24907[24907] +- Change monitoring defaults for agent {pull-beats}[18927] +- Agent verifies packages before using them {pull-beats}[18876] +- Change stream.* to dataset.* fields {pull-beats}[18967] +- Agent now runs the GRPC server and spawned application connect by to Agent {pull-beats}[18973] +- Rename input.type logs to logfile {pull-beats}[19360] +- Agent now installs/uninstalls Elastic Endpoint {pull-beats}[19248] +- Agent now downloads Elastic Endpoint {pull-beats}[19503] +- Refuse invalid stream values in configuration {pull-beats}[19587] +- Agent now load balances across multiple Kibana instances {pull-beats}[19628] +- Configuration cleanup {pull-beats}[19848] +- Agent now sends its own logs to elasticsearch {pull-beats}[19811] +- Add --insecure option to enroll command {pull-beats}[19900] +- Will retry to enroll if the server return a 429. {pull-beats}[19811] +- Add --staging option to enroll command {pull-beats}[20026] +- Add `event.dataset` to all events {pull-beats}[20076] +- Send datastreams fields {pull-beats}[20416] +- Agent supports capabilities definition {pull-beats}[23848] +- Restart process on output change {pull-beats}[24907] === Docs @@ -75,61 +78,61 @@ === Elastic Agent version 7.8.0 ==== Breaking changes -- Rename agent to elastic-agent {pull}17391[17391] +- Rename agent to elastic-agent {pull-beats}[17391] ==== Bugfixes -- Fixed tests on windows {pull}16922[16922] -- Fixed installers for SNAPSHOTs and windows {pull}17077[17077] -- Fixed merge of config {pull}17399[17399] -- Handle abs paths on windows correctly {pull}17461[17461] -- Improved cancellation of agent {pull}17318[17318] -- Fixed process spawning on Windows {pull}17751[17751] -- Fix issues when running `mage package` for all the platforms. {pull}17767[17767] -- Rename the User-Agent string from Beats Agent to Elastic Agent. {pull}17765[17765] -- Remove the kbn-version on each request to the Kibana API. {pull}17764[17764] -- Fixed injected log path to monitoring beat {pull}17833[17833] -- Make sure that the Elastic Agent connect over TLS in cloud. {pull}17843[17843] -- Moved stream.* fields to top of event {pull}17858[17858] -- Use /tmp for default monitoring endpoint location for libbeat {pull}18131[18131] -- Use default output by default {pull}18091[18091] -- Fix panic and flaky tests for the Agent. {pull}18135[18135] -- Fix default configuration after enroll {pull}18232[18232] -- Fix make sure the collected logs or metrics include streams information. {pull}18261[18261] -- Fix version to 7.8 {pull}18286[18286] -- Fix an issue where the checkin_frequency, jitter, and backoff options where not configurable. {pull}17843[17843] -- Ensure that the beats uses the params prefer_v2_templates on bulk request. {pull}18318[18318] -- Stop monitoring on config change {pull}18284[18284] -- Enable more granular control of monitoring {pull}18346[18346] -- Fix jq: command not found {pull}18408[18408] -- Avoid Chown on windows {pull}18512[18512] -- Clean action store after enrolling to new configuration {pull}18656[18656] -- Avoid watching monitor logs {pull}18723[18723] -- Correctly report platform and family. {issue}18665[18665] -- Guard against empty stream.datasource and namespace {pull}18769[18769] -- Fix install service script for windows {pull}18814[18814] +- Fixed tests on windows {pull-beats}[16922] +- Fixed installers for SNAPSHOTs and windows {pull-beats}[17077] +- Fixed merge of config {pull-beats}[17399] +- Handle abs paths on windows correctly {pull-beats}[17461] +- Improved cancellation of agent {pull-beats}[17318] +- Fixed process spawning on Windows {pull-beats}[17751] +- Fix issues when running `mage package` for all the platforms. {pull-beats}[17767] +- Rename the User-Agent string from Beats Agent to Elastic Agent. {pull-beats}[17765] +- Remove the kbn-version on each request to the Kibana API. {pull-beats}[17764] +- Fixed injected log path to monitoring beat {pull-beats}[17833] +- Make sure that the Elastic Agent connect over TLS in cloud. {pull-beats}[17843] +- Moved stream.* fields to top of event {pull-beats}[17858] +- Use /tmp for default monitoring endpoint location for libbeat {pull-beats}[18131] +- Use default output by default {pull-beats}[18091] +- Fix panic and flaky tests for the Agent. {pull-beats}[18135] +- Fix default configuration after enroll {pull-beats}[18232] +- Fix make sure the collected logs or metrics include streams information. {pull-beats}[18261] +- Fix version to 7.8 {pull-beats}[18286] +- Fix an issue where the checkin_frequency, jitter, and backoff options where not configurable. {pull-beats}[17843] +- Ensure that the beats uses the params prefer_v2_templates on bulk request. {pull-beats}[18318] +- Stop monitoring on config change {pull-beats}[18284] +- Enable more granular control of monitoring {pull-beats}[18346] +- Fix jq: command not found {pull-beats}[18408] +- Avoid Chown on windows {pull-beats}[18512] +- Clean action store after enrolling to new configuration {pull-beats}[18656] +- Avoid watching monitor logs {pull-beats}[18723] +- Correctly report platform and family. {issue-beats}[18665] +- Guard against empty stream.datasource and namespace {pull-beats}[18769] +- Fix install service script for windows {pull-beats}[18814] ==== New features -- Generate index name in a format type-dataset-namespace {pull}16903[16903] -- OS agnostic default configuration {pull}17016[17016] -- Introduced post install hooks {pull}17241[17241] -- Support for config constraints {pull}17112[17112] -- Introduced `mage demo` command {pull}17312[17312] -- Display the stability of the agent at enroll and start. {pull}17336[17336] -- Expose stream.* variables in events {pull}17468[17468] -- Monitoring configuration reloadable {pull}17855[17855] -- Pack ECS metadata to request payload send to fleet {pull}17894[17894] -- Allow CLI overrides of paths {pull}17781[17781] -- Enable Filebeat input: S3, Azureeventhub, cloudfoundry, httpjson, netflow, o365audit. {pull}17909[17909] -- Configurable log level {pull}18083[18083] -- Use data subfolder as default for process logs {pull}17960[17960] -- Enable introspecting configuration {pull}18124[18124] -- Follow home path for all config files {pull}18161[18161] -- Do not require unnecessary configuration {pull}18003[18003] -- Use nested objects so fleet can handle metadata correctly {pull}18234[18234] -- Enable debug log level for Metricbeat and Filebeat when run under the Elastic Agent. {pull}17935[17935] -- Pick up version from libbeat {pull}18350[18350] -- More clear output of inspect command {pull}18405[18405] -- When not port are specified and the https is used fallback to 443 {pull}18844[18844] -- Basic upgrade process {pull}21002[21002] +- Generate index name in a format type-dataset-namespace {pull-beats}[16903] +- OS agnostic default configuration {pull-beats}[17016] +- Introduced post install hooks {pull-beats}[17241] +- Support for config constraints {pull-beats}[17112] +- Introduced `mage demo` command {pull-beats}[17312] +- Display the stability of the agent at enroll and start. {pull-beats}[17336] +- Expose stream.* variables in events {pull-beats}[17468] +- Monitoring configuration reloadable {pull-beats}[17855] +- Pack ECS metadata to request payload send to fleet {pull-beats}[17894] +- Allow CLI overrides of paths {pull-beats}[17781] +- Enable Filebeat input: S3, Azureeventhub, cloudfoundry, httpjson, netflow, o365audit. {pull-beats}[17909] +- Configurable log level {pull-beats}[18083] +- Use data subfolder as default for process logs {pull-beats}[17960] +- Enable introspecting configuration {pull-beats}[18124] +- Follow home path for all config files {pull-beats}[18161] +- Do not require unnecessary configuration {pull-beats}[18003] +- Use nested objects so fleet can handle metadata correctly {pull-beats}[18234] +- Enable debug log level for Metricbeat and Filebeat when run under the Elastic Agent. {pull-beats}[17935] +- Pick up version from libbeat {pull-beats}[18350] +- More clear output of inspect command {pull-beats}[18405] +- When not port are specified and the https is used fallback to 443 {pull-beats}[18844] +- Basic upgrade process {pull-beats}[21002] diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index cbe894d63c5..00f91675544 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -1,101 +1,104 @@ // Use these for links to issue and pulls. Note issues and pulls redirect one to // each other on Github, so don't worry too much on using the right prefix. -:issue: https://github.com/elastic/beats/issues/ -:pull: https://github.com/elastic/beats/pull/ +:issue-beats: https://github.com/elastic/beats/issues/ +:pull-beats: https://github.com/elastic/beats/pull/ + +:issue: https://github.com/elastic/elastic-agent/issues/ +:pull: https://github.com/elastic/elastic-agent/pull/ === Elastic Agent version HEAD ==== Breaking changes -- Docker container is not run as root by default. {pull}21213[21213] -- Read Fleet connection information from `fleet.*` instead of `fleet.kibana.*`. {pull}24713[24713] -- Beats build for 32Bit Windows or Linux system will refuse to run on a 64bit system. {pull}25186[25186] -- Remove the `--kibana-url` from `install` and `enroll` command. {pull}25529[25529] -- Default to port 80 and 443 for Kibana and Fleet Server connections. {pull}25723[25723] -- Remove deprecated/undocumented IncludeCreatorMetadata setting from kubernetes metadata config options {pull}28006[28006] -- The `/processes/` endpoint proxies to the subprocess's monitoring endpoint, instead of querying its `/stats` endpoint {pull}28165[28165] -- Remove username/password for fleet-server authentication. {pull}29458[29458] +- Docker container is not run as root by default. {pull-beats}[21213] +- Read Fleet connection information from `fleet.*` instead of `fleet.kibana.*`. {pull-beats}[24713] +- Beats build for 32Bit Windows or Linux system will refuse to run on a 64bit system. {pull-beats}[25186] +- Remove the `--kibana-url` from `install` and `enroll` command. {pull-beats}[25529] +- Default to port 80 and 443 for Kibana and Fleet Server connections. {pull-beats}[25723] +- Remove deprecated/undocumented IncludeCreatorMetadata setting from kubernetes metadata config options {pull-beats}[28006] +- The `/processes/` endpoint proxies to the subprocess's monitoring endpoint, instead of querying its `/stats` endpoint {pull-beats}[28165] +- Remove username/password for fleet-server authentication. {pull-beats}[29458] ==== Bugfixes -- Fix rename *ConfigChange to *PolicyChange to align on changes in the UI. {pull}20779[20779] -- Thread safe sorted set {pull}21290[21290] -- Copy Action store on upgrade {pull}21298[21298] -- Include inputs in action store actions {pull}21298[21298] -- Fix issue where inputs without processors defined would panic {pull}21628[21628] -- Prevent reporting ecs version twice {pull}21616[21616] -- Partial extracted beat result in failure to spawn beat {issue}21718[21718] -- Use symlink path for reexecutions {pull}21835[21835] -- Use ML_SYSTEM to detect if agent is running as a service {pull}21884[21884] -- Use local temp instead of system one {pull}21883[21883] -- Rename monitoring index from `elastic.agent` to `elastic_agent` {pull}21932[21932] -- Fix issue with named pipes on Windows 7 {pull}21931[21931] -- Fix missing elastic_agent event data {pull}21994[21994] -- Ensure shell wrapper path exists before writing wrapper on install {pull}22144[22144] -- Fix deb/rpm packaging for Elastic Agent {pull}22153[22153] -- Fix composable input processor promotion to fix duplicates {pull}22344[22344] -- Fix sysv init files for deb/rpm installation {pull}22543[22543] -- Fix shell wrapper for deb/rpm packaging {pull}23038[23038] -- Fixed parsing of npipe URI {pull}22978[22978] -- Select default agent policy if no enrollment token provided. {pull}23973[23973] -- Remove artifacts on transient download errors {pull}23235[23235] -- Support for linux/arm64 {pull}23479[23479] -- Skip top level files when unziping archive during upgrade {pull}23456[23456] -- Do not take ownership of Endpoint log path {pull}23444[23444] -- Fixed fetching DBus service PID {pull}23496[23496] -- Fix issue of missing log messages from filebeat monitor {pull}23514[23514] -- Increase checkin grace period to 30 seconds {pull}23568[23568] -- Fix libbeat from reporting back degraded on config update {pull}23537[23537] -- Rewrite check if agent is running with admin rights on Windows {pull}23970[23970] -- Fix issues with dynamic inputs and conditions {pull}23886[23886] -- Fix bad substitution of API key. {pull}24036[24036] -- Fix docker enrollment issue related to Fleet Server change. {pull}24155[24155] -- Improve log on failure of Endpoint Security installation. {pull}24429[24429] -- Verify communication to Kibana before updating Fleet client. {pull}24489[24489] -- Fix nil pointer when null is generated as list item. {issue}23734[23734] -- Add support for filestream input. {pull}24820[24820] -- Add check for URL set when cert and cert key. {pull}24904[24904] -- Fix install command for Fleet Server bootstrap, remove need for --enrollment-token when using --fleet-server {pull}24981[24981] -- Respect host configuration for exposed processes endpoint {pull}25114[25114] -- Set --inscure in container when FLEET_SERVER_ENABLE and FLEET_INSECURE set {pull}25137[25137] -- Fixed: limit for retries to Kibana configurable {issue}25063[25063] -- Fix issue with status and inspect inside of container {pull}25204[25204] -- Remove FLEET_SERVER_POLICY_NAME env variable as it was not used {pull}25149[25149] -- Reduce log level for listener cleanup to debug {pull}25274 -- Passing in policy id to container command works {pull}25352[25352] -- Reduce log level for listener cleanup to debug {pull}25274[25274] -- Delay the restart of application when a status report of failure is given {pull}25339[25339] -- Don't log when upgrade capability doesn't apply {pull}25386[25386] -- Fixed issue when unversioned home is set and invoked watcher failing with ENOENT {issue}25371[25371] -- Fixed Elastic Agent: expecting Dict and received *transpiler.Key for '0' {issue}24453[24453] -- Fix AckBatch to do nothing when no actions passed {pull}25562[25562] -- Add error log entry when listener creation fails {issue}23483[23482] -- Handle case where policy doesn't contain Fleet connection information {pull}25707[25707] -- Fix fleet-server.yml spec to not overwrite existing keys {pull}25741[25741] -- Agent sends wrong log level to Endpoint {issue}25583[25583] -- Fix startup with failing configuration {pull}26057[26057] -- Change timestamp in elatic-agent-json.log to use UTC {issue}25391[25391] -- Fix add support for Logstash output. {pull}24305[24305] -- Do not log Elasticsearch configuration for monitoring output when running with debug. {pull}26583[26583] -- Fix issue where proxy enrollment options broke enrollment command. {pull}26749[26749] -- Remove symlink.prev from previously failed upgrade {pull}26785[26785] -- Fix apm-server supported outputs not being in sync with supported output types. {pull}26885[26885] -- Set permissions during installation {pull}26665[26665] -- Disable monitoring during fleet-server bootstrapping. {pull}27222[27222] -- Fix issue with atomic extract running in K8s {pull}27396[27396] -- Fix issue with install directory in state path in K8s {pull}27396[27396] -- Disable monitoring during fleet-server bootstrapping. {pull}27222[27222] -- Change output.elasticsearch.proxy_disabled flag to output.elasticsearch.proxy_disable so fleet uses it. {issue}27670[27670] {pull}27671[27671] -- Add validation for certificate flags to ensure they are absolute paths. {pull}27779[27779] -- Migrate state on upgrade {pull}27825[27825] -- Add "_monitoring" suffix to monitoring instance names to remove ambiguity with the status command. {issue}25449[25449] -- Ignore ErrNotExists when fixing permissions. {issue}27836[27836] {pull}27846[27846] -- Snapshot artifact lookup will use agent.download proxy settings. {issue}27903[27903] {pull}27904[27904] -- Fix lazy acker to only add new actions to the batch. {pull}27981[27981] -- Allow HTTP metrics to run in bootstrap mode. Add ability to adjust timeouts for Fleet Server. {pull}28260[28260] -- Fix agent configuration overwritten by default fleet config. {pull}29297[29297] -- Allow agent containers to use basic auth to create a service token. {pull}29651[29651] -- Fix issue where a failing artifact verification does not remove the bad artifact. {pull}30281[30281] -- Reduce Elastic Agent shut down time by stopping processes concurrently {pull}29650[29650] +- Fix rename *ConfigChange to *PolicyChange to align on changes in the UI. {pull-beats}[20779] +- Thread safe sorted set {pull-beats}[21290] +- Copy Action store on upgrade {pull-beats}[21298] +- Include inputs in action store actions {pull-beats}[21298] +- Fix issue where inputs without processors defined would panic {pull-beats}[21628] +- Prevent reporting ecs version twice {pull-beats}[21616] +- Partial extracted beat result in failure to spawn beat {issue-beats}[21718] +- Use symlink path for reexecutions {pull-beats}[21835] +- Use ML_SYSTEM to detect if agent is running as a service {pull-beats}[21884] +- Use local temp instead of system one {pull-beats}[21883] +- Rename monitoring index from `elastic.agent` to `elastic_agent` {pull-beats}[21932] +- Fix issue with named pipes on Windows 7 {pull-beats}[21931] +- Fix missing elastic_agent event data {pull-beats}[21994] +- Ensure shell wrapper path exists before writing wrapper on install {pull-beats}[22144] +- Fix deb/rpm packaging for Elastic Agent {pull-beats}[22153] +- Fix composable input processor promotion to fix duplicates {pull-beats}[22344] +- Fix sysv init files for deb/rpm installation {pull-beats}[22543] +- Fix shell wrapper for deb/rpm packaging {pull-beats}[23038] +- Fixed parsing of npipe URI {pull-beats}[22978] +- Select default agent policy if no enrollment token provided. {pull-beats}[23973] +- Remove artifacts on transient download errors {pull-beats}[23235] +- Support for linux/arm64 {pull-beats}[23479] +- Skip top level files when unziping archive during upgrade {pull-beats}[23456] +- Do not take ownership of Endpoint log path {pull-beats}[23444] +- Fixed fetching DBus service PID {pull-beats}[23496] +- Fix issue of missing log messages from filebeat monitor {pull-beats}[23514] +- Increase checkin grace period to 30 seconds {pull-beats}[23568] +- Fix libbeat from reporting back degraded on config update {pull-beats}[23537] +- Rewrite check if agent is running with admin rights on Windows {pull-beats}[23970] +- Fix issues with dynamic inputs and conditions {pull-beats}[23886] +- Fix bad substitution of API key. {pull-beats}[24036] +- Fix docker enrollment issue related to Fleet Server change. {pull-beats}[24155] +- Improve log on failure of Endpoint Security installation. {pull-beats}[24429] +- Verify communication to Kibana before updating Fleet client. {pull-beats}[24489] +- Fix nil pointer when null is generated as list item. {issue-beats}[23734] +- Add support for filestream input. {pull-beats}[24820] +- Add check for URL set when cert and cert key. {pull-beats}[24904] +- Fix install command for Fleet Server bootstrap, remove need for --enrollment-token when using --fleet-server {pull-beats}[24981] +- Respect host configuration for exposed processes endpoint {pull-beats}[25114] +- Set --inscure in container when FLEET_SERVER_ENABLE and FLEET_INSECURE set {pull-beats}[25137] +- Fixed: limit for retries to Kibana configurable {issue-beats}[25063] +- Fix issue with status and inspect inside of container {pull-beats}[25204] +- Remove FLEET_SERVER_POLICY_NAME env variable as it was not used {pull-beats}[25149] +- Reduce log level for listener cleanup to debug {pull-beats} +- Passing in policy id to container command works {pull-beats}[25352] +- Reduce log level for listener cleanup to debug {pull-beats}[25274] +- Delay the restart of application when a status report of failure is given {pull-beats}[25339] +- Don't log when upgrade capability doesn't apply {pull-beats}[25386] +- Fixed issue when unversioned home is set and invoked watcher failing with ENOENT {issue-beats}[25371] +- Fixed Elastic Agent: expecting Dict and received *transpiler.Key for '0' {issue-beats}[24453] +- Fix AckBatch to do nothing when no actions passed {pull-beats}[25562] +- Add error log entry when listener creation fails {issue-beats}[23482] +- Handle case where policy doesn't contain Fleet connection information {pull-beats}[25707] +- Fix fleet-server.yml spec to not overwrite existing keys {pull-beats}[25741] +- Agent sends wrong log level to Endpoint {issue-beats}[25583] +- Fix startup with failing configuration {pull-beats}[26057] +- Change timestamp in elatic-agent-json.log to use UTC {issue-beats}[25391] +- Fix add support for Logstash output. {pull-beats}[24305] +- Do not log Elasticsearch configuration for monitoring output when running with debug. {pull-beats}[26583] +- Fix issue where proxy enrollment options broke enrollment command. {pull-beats}[26749] +- Remove symlink.prev from previously failed upgrade {pull-beats}[26785] +- Fix apm-server supported outputs not being in sync with supported output types. {pull-beats}[26885] +- Set permissions during installation {pull-beats}[26665] +- Disable monitoring during fleet-server bootstrapping. {pull-beats}[27222] +- Fix issue with atomic extract running in K8s {pull-beats}[27396] +- Fix issue with install directory in state path in K8s {pull-beats}[27396] +- Disable monitoring during fleet-server bootstrapping. {pull-beats}[27222] +- Change output.elasticsearch.proxy_disabled flag to output.elasticsearch.proxy_disable so fleet uses it. {issue-beats}[27670] {pull-beats}[27671] +- Add validation for certificate flags to ensure they are absolute paths. {pull-beats}[27779] +- Migrate state on upgrade {pull-beats}[27825] +- Add "_monitoring" suffix to monitoring instance names to remove ambiguity with the status command. {issue-beats}[25449] +- Ignore ErrNotExists when fixing permissions. {issue-beats}[27836] {pull-beats}[27846] +- Snapshot artifact lookup will use agent.download proxy settings. {issue-beats}[27903] {pull-beats}[27904] +- Fix lazy acker to only add new actions to the batch. {pull-beats}[27981] +- Allow HTTP metrics to run in bootstrap mode. Add ability to adjust timeouts for Fleet Server. {pull-beats}[28260] +- Fix agent configuration overwritten by default fleet config. {pull-beats}[29297] +- Allow agent containers to use basic auth to create a service token. {pull-beats}[29651] +- Fix issue where a failing artifact verification does not remove the bad artifact. {pull-beats}[30281] +- Reduce Elastic Agent shut down time by stopping processes concurrently {pull-beats}[29650] - Move `context cancelled` error from fleet gateway into debug level. {pull}187[187] - Update library containerd to 1.5.10. {pull}186[186] - Add fleet-server to output of elastic-agent inspect output command (and diagnostic bundle). {pull}243[243] @@ -106,75 +109,75 @@ ==== New features -- Prepare packaging for endpoint and asc files {pull}20186[20186] -- Improved version CLI {pull}20359[20359] -- Enroll CLI now restarts running daemon {pull}20359[20359] -- Add restart CLI cmd {pull}20359[20359] -- Add new `synthetics/*` inputs to run Heartbeat {pull}20387[20387] -- Users of the Docker image can now pass `FLEET_ENROLL_INSECURE=1` to include the `--insecure` flag with the `elastic-agent enroll` command {issue}20312[20312] {pull}20713[20713] -- Add `docker` composable dynamic provider. {pull}20842[20842] -- Add support for dynamic inputs with providers and `{{variable|"default"}}` substitution. {pull}20839[20839] -- Add support for EQL based condition on inputs {pull}20994[20994] -- Send `fleet.host.id` to Endpoint Security {pull}21042[21042] -- Add `install` and `uninstall` subcommands {pull}21206[21206] -- Use new form of fleet API paths {pull}21478[21478] -- Add `kubernetes` composable dynamic provider. {pull}21480[21480] -- Send updating state {pull}21461[21461] -- Add `elastic.agent.id` and `elastic.agent.version` to published events from filebeat and metricbeat {pull}21543[21543] -- Add `upgrade` subcommand to perform upgrade of installed Elastic Agent {pull}21425[21425] -- Update `fleet.yml` and Kibana hosts when a policy change updates the Kibana hosts {pull}21599[21599] -- Update `install` command to perform enroll before starting Elastic Agent {pull}21772[21772] -- Update `fleet.kibana.path` from a POLICY_CHANGE {pull}21804[21804] -- Removed `install-service.ps1` and `uninstall-service.ps1` from Windows .zip packaging {pull}21694[21694] -- Add `priority` to `AddOrUpdate` on dynamic composable input providers communication channel {pull}22352[22352] -- Ship `endpoint-security` logs to elasticsearch {pull}22526[22526] -- Log level reloadable from fleet {pull}22690[22690] -- Push log level downstream {pull}22815[22815] -- Add metrics collection for Agent {pull}22793[22793] -- Add support for Fleet Server {pull}23736[23736] -- Add support for enrollment with local bootstrap of Fleet Server {pull}23865[23865] -- Add TLS support for Fleet Server {pull}24142[24142] -- Add support for Fleet Server running under Elastic Agent {pull}24220[24220] -- Add CA support to Elastic Agent docker image {pull}24486[24486] -- Add k8s secrets provider for Agent {pull}24789[24789] -- Add STATE_PATH, CONFIG_PATH, LOGS_PATH to Elastic Agent docker image {pull}24817[24817] -- Add status subcommand {pull}24856[24856] -- Add leader_election provider for k8s {pull}24267[24267] -- Add --fleet-server-service-token and FLEET_SERVER_SERVICE_TOKEN options {pull}25083[25083] -- Keep http and logging config during enroll {pull}25132[25132] -- Log output of container to $LOGS_PATH/elastic-agent-start.log when LOGS_PATH set {pull}25150[25150] -- Use `filestream` input for internal log collection. {pull}25660[25660] -- Enable agent to send custom headers to kibana/ES {pull}26275[26275] -- Set `agent.id` to the Fleet Agent ID in events published from inputs backed by Beats. {issue}21121[21121] {pull}26394[26394] {pull}26548[26548] -- Add proxy support to artifact downloader and communication with fleet server. {pull}25219[25219] -- Add proxy support to enroll command. {pull}26514[26514] -- Enable configuring monitoring namespace {issue}26439[26439] -- Communicate with Fleet Server over HTTP2. {pull}26474[26474] -- Pass logging.metrics.enabled to beats to stop beats from adding metrics into their logs. {issue}26758[26758] {pull}26828[26828] -- Support Node and Service autodiscovery in kubernetes dynamic provider. {pull}26801[26801] -- Increase Agent's mem limits in k8s. {pull}27153[27153] -- Add new --enroll-delay option for install and enroll commands. {pull}27118[27118] -- Add link to troubleshooting guide on fatal exits. {issue}26367[26367] {pull}27236[27236] -- Agent now adapts the beats queue size based on output settings. {issue}26638[26638] {pull}27429[27429] -- Support ephemeral containers in Kubernetes dynamic provider. {issue}27020[#27020] {pull}27707[27707] -- Add complete k8s metadata through composable provider. {pull}27691[27691] -- Add diagnostics command to gather beat metadata. {pull}28265[28265] -- Add diagnostics collect command to gather beat metadata, config, policy, and logs and bundle it into an archive. {pull}28461[28461] -- Add `KIBANA_FLEET_SERVICE_TOKEN` to Elastic Agent container. {pull}28096[28096] -- Enable pprof endpoints for beats processes. Allow pprof endpoints for elastic-agent if enabled. {pull}28983[28983] -- Add `--pprof` flag to `elastic-agent diagnostics` and an `elastic-agent pprof` command to allow operators to gather pprof data from the agent and beats running under it. {pull}28798[28798] -- Allow pprof endpoints for elastic-agent or beats if enabled. {pull}28983[28983] {pull}29155[29155] -- Add --fleet-server-es-ca-trusted-fingerprint flag to allow agent/fleet-server to work with elasticsearch clusters using self signed certs. {pull}29128[29128] -- Discover changes in Kubernetes nodes metadata as soon as they happen. {pull}23139[23139] -- Add results of inspect output command into archive produced by diagnostics collect. {pull}29902[29902] -- Add support for loading input configuration from external configuration files in standalone mode. You can load inputs from YAML configuration files under the folder `{path.config}/inputs.d`. {pull}30087[30087] -- Install command will skip install/uninstall steps when installation via package is detected on Linux distros. {pull}30289[30289] -- Update docker/distribution dependency library to fix a security issues concerning OCI Manifest Type Confusion Issue. {pull}30462[30462] -- Add action_input_type for the .fleet-actions-results {pull}30562[30562] -- Add support for enabling the metrics buffer endpoint in the elastic-agent and beats it runs. diagnostics collect command will gather metrics-buffer data if enabled. {pull}30471[30471] +- Prepare packaging for endpoint and asc files {pull-beats}[20186] +- Improved version CLI {pull-beats}[20359] +- Enroll CLI now restarts running daemon {pull-beats}[20359] +- Add restart CLI cmd {pull-beats}[20359] +- Add new `synthetics/*` inputs to run Heartbeat {pull-beats}[20387] +- Users of the Docker image can now pass `FLEET_ENROLL_INSECURE=1` to include the `--insecure` flag with the `elastic-agent enroll` command {issue-beats}[20312] {pull-beats}[20713] +- Add `docker` composable dynamic provider. {pull-beats}[20842] +- Add support for dynamic inputs with providers and `{{variable|"default"}}` substitution. {pull-beats}[20839] +- Add support for EQL based condition on inputs {pull-beats}[20994] +- Send `fleet.host.id` to Endpoint Security {pull-beats}[21042] +- Add `install` and `uninstall` subcommands {pull-beats}[21206] +- Use new form of fleet API paths {pull-beats}[21478] +- Add `kubernetes` composable dynamic provider. {pull-beats}[21480] +- Send updating state {pull-beats}[21461] +- Add `elastic.agent.id` and `elastic.agent.version` to published events from filebeat and metricbeat {pull-beats}[21543] +- Add `upgrade` subcommand to perform upgrade of installed Elastic Agent {pull-beats}[21425] +- Update `fleet.yml` and Kibana hosts when a policy change updates the Kibana hosts {pull-beats}[21599] +- Update `install` command to perform enroll before starting Elastic Agent {pull-beats}[21772] +- Update `fleet.kibana.path` from a POLICY_CHANGE {pull-beats}[21804] +- Removed `install-service.ps1` and `uninstall-service.ps1` from Windows .zip packaging {pull-beats}[21694] +- Add `priority` to `AddOrUpdate` on dynamic composable input providers communication channel {pull-beats}[22352] +- Ship `endpoint-security` logs to elasticsearch {pull-beats}[22526] +- Log level reloadable from fleet {pull-beats}[22690] +- Push log level downstream {pull-beats}[22815] +- Add metrics collection for Agent {pull-beats}[22793] +- Add support for Fleet Server {pull-beats}[23736] +- Add support for enrollment with local bootstrap of Fleet Server {pull-beats}[23865] +- Add TLS support for Fleet Server {pull-beats}[24142] +- Add support for Fleet Server running under Elastic Agent {pull-beats}[24220] +- Add CA support to Elastic Agent docker image {pull-beats}[24486] +- Add k8s secrets provider for Agent {pull-beats}[24789] +- Add STATE_PATH, CONFIG_PATH, LOGS_PATH to Elastic Agent docker image {pull-beats}[24817] +- Add status subcommand {pull-beats}[24856] +- Add leader_election provider for k8s {pull-beats}[24267] +- Add --fleet-server-service-token and FLEET_SERVER_SERVICE_TOKEN options {pull-beats}[25083] +- Keep http and logging config during enroll {pull-beats}[25132] +- Log output of container to $LOGS_PATH/elastic-agent-start.log when LOGS_PATH set {pull-beats}[25150] +- Use `filestream` input for internal log collection. {pull-beats}[25660] +- Enable agent to send custom headers to kibana/ES {pull-beats}[26275] +- Set `agent.id` to the Fleet Agent ID in events published from inputs backed by Beats. {issue-beats}[21121] {pull-beats}[26394] {pull-beats}[26548] +- Add proxy support to artifact downloader and communication with fleet server. {pull-beats}[25219] +- Add proxy support to enroll command. {pull-beats}[26514] +- Enable configuring monitoring namespace {issue-beats}[26439] +- Communicate with Fleet Server over HTTP2. {pull-beats}[26474] +- Pass logging.metrics.enabled to beats to stop beats from adding metrics into their logs. {issue-beats}[26758] {pull-beats}[26828] +- Support Node and Service autodiscovery in kubernetes dynamic provider. {pull-beats}[26801] +- Increase Agent's mem limits in k8s. {pull-beats}[27153] +- Add new --enroll-delay option for install and enroll commands. {pull-beats}[27118] +- Add link to troubleshooting guide on fatal exits. {issue-beats}[26367] {pull-beats}[27236] +- Agent now adapts the beats queue size based on output settings. {issue-beats}[26638] {pull-beats}[27429] +- Support ephemeral containers in Kubernetes dynamic provider. {issue-beats}[#27020] {pull-beats}[27707] +- Add complete k8s metadata through composable provider. {pull-beats}[27691] +- Add diagnostics command to gather beat metadata. {pull-beats}[28265] +- Add diagnostics collect command to gather beat metadata, config, policy, and logs and bundle it into an archive. {pull-beats}[28461] +- Add `KIBANA_FLEET_SERVICE_TOKEN` to Elastic Agent container. {pull-beats}[28096] +- Enable pprof endpoints for beats processes. Allow pprof endpoints for elastic-agent if enabled. {pull-beats}[28983] +- Add `--pprof` flag to `elastic-agent diagnostics` and an `elastic-agent pprof` command to allow operators to gather pprof data from the agent and beats running under it. {pull-beats}[28798] +- Allow pprof endpoints for elastic-agent or beats if enabled. {pull-beats}[28983] {pull-beats}[29155] +- Add --fleet-server-es-ca-trusted-fingerprint flag to allow agent/fleet-server to work with elasticsearch clusters using self signed certs. {pull-beats}[29128] +- Discover changes in Kubernetes nodes metadata as soon as they happen. {pull-beats}[23139] +- Add results of inspect output command into archive produced by diagnostics collect. {pull-beats}[29902] +- Add support for loading input configuration from external configuration files in standalone mode. You can load inputs from YAML configuration files under the folder `{path.config}/inputs.d`. {pull-beats}[30087] +- Install command will skip install/uninstall steps when installation via package is detected on Linux distros. {pull-beats}[30289] +- Update docker/distribution dependency library to fix a security issues concerning OCI Manifest Type Confusion Issue. {pull-beats}[30462] +- Add action_input_type for the .fleet-actions-results {pull-beats}[30562] +- Add support for enabling the metrics buffer endpoint in the elastic-agent and beats it runs. diagnostics collect command will gather metrics-buffer data if enabled. {pull-beats}[30471] - Update ack response schema and processing, add retrier for acks {pull}200[200] - Enhance error messages and logs for process start {pull}225[225] -- Changed the default policy selection logic. When the agent has no policy id or name defined, it will fall back to defaults (defined by $FLEET_SERVER_POLICY_ID and $FLEET_DEFAULT_TOKEN_POLICY_NAME environment variables respectively). {issue}29774[29774] {pull}226[226] +- Changed the default policy selection logic. When the agent has no policy id or name defined, it will fall back to defaults (defined by $FLEET_SERVER_POLICY_ID and $FLEET_DEFAULT_TOKEN_POLICY_NAME environment variables respectively). {issue-beats}[29774] {pull}226[226] - Add Elastic APM instrumentation {pull}180[180] - Agent can be built for `darwin/arm64`. When it's built for both `darwin/arm64` and `darwin/adm64` a universal binary is also built and packaged. {pull}203[203] - Add support for Cloudbeat. {pull}179[179] From eb6fa029d0dad53ce56f210af08741167b7e3416 Mon Sep 17 00:00:00 2001 From: Daniel Araujo Almeida Date: Wed, 29 Jun 2022 10:17:37 -0600 Subject: [PATCH 023/180] Add mount of /etc/machine-id for managed Agent in k8s (#530) --- deploy/kubernetes/elastic-agent-managed-kubernetes.yaml | 7 +++++++ .../elastic-agent-managed-daemonset.yaml | 7 +++++++ 2 files changed, 14 insertions(+) diff --git a/deploy/kubernetes/elastic-agent-managed-kubernetes.yaml b/deploy/kubernetes/elastic-agent-managed-kubernetes.yaml index 882e7b46e21..761d659771a 100644 --- a/deploy/kubernetes/elastic-agent-managed-kubernetes.yaml +++ b/deploy/kubernetes/elastic-agent-managed-kubernetes.yaml @@ -85,6 +85,9 @@ spec: - name: etcsysmd mountPath: /hostfs/etc/systemd readOnly: true + - name: etc-mid + mountPath: /etc/machine-id + readOnly: true volumes: - name: proc hostPath: @@ -113,6 +116,10 @@ spec: - name: etcsysmd hostPath: path: /etc/systemd + - name: etc-mid + hostPath: + path: /etc/machine-id + type: File --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding diff --git a/deploy/kubernetes/elastic-agent-managed/elastic-agent-managed-daemonset.yaml b/deploy/kubernetes/elastic-agent-managed/elastic-agent-managed-daemonset.yaml index 097d9786e03..ee7b3dc3219 100644 --- a/deploy/kubernetes/elastic-agent-managed/elastic-agent-managed-daemonset.yaml +++ b/deploy/kubernetes/elastic-agent-managed/elastic-agent-managed-daemonset.yaml @@ -85,6 +85,9 @@ spec: - name: etcsysmd mountPath: /hostfs/etc/systemd readOnly: true + - name: etc-mid + mountPath: /etc/machine-id + readOnly: true volumes: - name: proc hostPath: @@ -113,3 +116,7 @@ spec: - name: etcsysmd hostPath: path: /etc/systemd + - name: etc-mid + hostPath: + path: /etc/machine-id + type: File From 5f60fd0d8f2e8da6187ea2d0af6f9ab7e1c1b29e Mon Sep 17 00:00:00 2001 From: Daniel Araujo Almeida Date: Wed, 29 Jun 2022 10:18:10 -0600 Subject: [PATCH 024/180] Set hostPID=true for managed agent in k8s (#528) * Set hostPID=true for managed agent in k8s * Add comment on hostPID. --- deploy/kubernetes/elastic-agent-managed-kubernetes.yaml | 3 +++ .../elastic-agent-managed/elastic-agent-managed-daemonset.yaml | 3 +++ 2 files changed, 6 insertions(+) diff --git a/deploy/kubernetes/elastic-agent-managed-kubernetes.yaml b/deploy/kubernetes/elastic-agent-managed-kubernetes.yaml index 761d659771a..4771cf37727 100644 --- a/deploy/kubernetes/elastic-agent-managed-kubernetes.yaml +++ b/deploy/kubernetes/elastic-agent-managed-kubernetes.yaml @@ -19,6 +19,9 @@ spec: effect: NoSchedule serviceAccountName: elastic-agent hostNetwork: true + # Sharing the host process ID namespace gives visibility of all processes running on the same host. + # This enables the Elastic Security integration to observe all process exec events on the host. + hostPID: true dnsPolicy: ClusterFirstWithHostNet containers: - name: elastic-agent diff --git a/deploy/kubernetes/elastic-agent-managed/elastic-agent-managed-daemonset.yaml b/deploy/kubernetes/elastic-agent-managed/elastic-agent-managed-daemonset.yaml index ee7b3dc3219..231b976fe71 100644 --- a/deploy/kubernetes/elastic-agent-managed/elastic-agent-managed-daemonset.yaml +++ b/deploy/kubernetes/elastic-agent-managed/elastic-agent-managed-daemonset.yaml @@ -19,6 +19,9 @@ spec: effect: NoSchedule serviceAccountName: elastic-agent hostNetwork: true + # Sharing the host process ID namespace gives visibility of all processes running on the same host. + # This enables the Elastic Security integration to observe all process exec events on the host. + hostPID: true dnsPolicy: ClusterFirstWithHostNet containers: - name: elastic-agent From 7a4fa6bb4f79e48cdab12d540ea8e028e7f0a2f2 Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Thu, 30 Jun 2022 01:35:53 -0400 Subject: [PATCH 025/180] [Automation] Update elastic stack version to 8.4.0-86cc80f3 for testing (#648) Co-authored-by: apmmachine --- testing/environments/snapshot.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/testing/environments/snapshot.yml b/testing/environments/snapshot.yml index 70d8eb97f16..94af4334d03 100644 --- a/testing/environments/snapshot.yml +++ b/testing/environments/snapshot.yml @@ -3,7 +3,7 @@ version: '2.3' services: elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch:8.4.0-a6aa9f3b-SNAPSHOT + image: docker.elastic.co/elasticsearch/elasticsearch:8.4.0-86cc80f3-SNAPSHOT # When extend is used it merges healthcheck.tests, see: # https://github.com/docker/compose/issues/8962 # healthcheck: @@ -42,7 +42,7 @@ services: - ./docker/logstash/pki:/etc/pki:ro kibana: - image: docker.elastic.co/kibana/kibana:8.4.0-a6aa9f3b-SNAPSHOT + image: docker.elastic.co/kibana/kibana:8.4.0-86cc80f3-SNAPSHOT environment: - "ELASTICSEARCH_USERNAME=kibana_system_user" - "ELASTICSEARCH_PASSWORD=testing" From e194fb3bfd7bab75251bfb7bd52cd05704d9e2fb Mon Sep 17 00:00:00 2001 From: Mariana Dima Date: Thu, 30 Jun 2022 12:49:46 +0200 Subject: [PATCH 026/180] Update elastic-agent-libs version: includes restriction on default VerificationMode to `full` (#521) * update version * mage fmt update * update dependency * update changelog --- CHANGELOG.next.asciidoc | 1 + NOTICE.txt | 8 ++++---- go.mod | 4 ++-- go.sum | 7 ++++--- 4 files changed, 11 insertions(+), 9 deletions(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 00f91675544..2085c1976a4 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -105,6 +105,7 @@ - Update API calls that the agent makes to Kibana when running the container command. {pull}253[253] - diagnostics collect log names are fixed on Windows machines, command will ignore failures. AgentID is included in diagnostics(and diagnostics collect) output. {issue}81[81] {issue}92[92] {issue}190[190] {pull}262[262] - Collects stdout and stderr of applications run as a process and logs them. {issue}[88] +- Remove VerificationMode option to empty string. Default value is `full`. {issue}[184] - diagnostics collect file mod times are set. {pull}570[570] ==== New features diff --git a/NOTICE.txt b/NOTICE.txt index e80beb85357..8b5c887cbe4 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -1061,11 +1061,11 @@ SOFTWARE -------------------------------------------------------------------------------- Dependency : github.com/elastic/elastic-agent-libs -Version: v0.2.3 +Version: v0.2.6 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/elastic/elastic-agent-libs@v0.2.3/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/elastic/elastic-agent-libs@v0.2.6/LICENSE: Apache License Version 2.0, January 2004 @@ -16187,11 +16187,11 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- Dependency : gopkg.in/yaml.v3 -Version: v3.0.0-20210107192922-496545a6307b +Version: v3.0.1 Licence type (autodetected): MIT -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/gopkg.in/yaml.v3@v3.0.0-20210107192922-496545a6307b/LICENSE: +Contents of probable licence file $GOMODCACHE/gopkg.in/yaml.v3@v3.0.1/LICENSE: This project is covered by two different licenses: MIT and Apache. diff --git a/go.mod b/go.mod index 5bc60abc07b..95044f69abc 100644 --- a/go.mod +++ b/go.mod @@ -13,7 +13,7 @@ require ( github.com/elastic/e2e-testing v1.99.2-0.20220117192005-d3365c99b9c4 github.com/elastic/elastic-agent-autodiscover v0.0.0-20220404145827-89887023c1ab github.com/elastic/elastic-agent-client/v7 v7.0.0-20210727140539-f0905d9377f6 - github.com/elastic/elastic-agent-libs v0.2.3 + github.com/elastic/elastic-agent-libs v0.2.6 github.com/elastic/elastic-agent-system-metrics v0.3.0 github.com/elastic/go-licenser v0.4.0 github.com/elastic/go-sysinfo v1.7.1 @@ -132,7 +132,7 @@ require ( google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa // indirect google.golang.org/grpc/examples v0.0.0-20220304170021-431ea809a767 // indirect gopkg.in/inf.v0 v0.9.1 // indirect - gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect howett.net/plist v1.0.0 // indirect k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65 // indirect k8s.io/utils v0.0.0-20211116205334-6203023598ed // indirect diff --git a/go.sum b/go.sum index 7198e357fbf..5026651a90b 100644 --- a/go.sum +++ b/go.sum @@ -387,8 +387,8 @@ github.com/elastic/elastic-agent-client/v7 v7.0.0-20210727140539-f0905d9377f6 h1 github.com/elastic/elastic-agent-client/v7 v7.0.0-20210727140539-f0905d9377f6/go.mod h1:uh/Gj9a0XEbYoM4NYz4LvaBVARz3QXLmlNjsrKY9fTc= github.com/elastic/elastic-agent-libs v0.0.0-20220303160015-5b4e674da3dd/go.mod h1://82M1l73IHx0wDbS2Tzkq6Fx9fkmytS1KgkIyzvNTM= github.com/elastic/elastic-agent-libs v0.2.2/go.mod h1:1xDLBhIqBIjhJ7lr2s+xRFFkQHpitSp8q2zzv1Dqg+s= -github.com/elastic/elastic-agent-libs v0.2.3 h1:GY8M0fxOs/GBY2nIB+JOB91aoD72S87iEcm2qVGFUqI= -github.com/elastic/elastic-agent-libs v0.2.3/go.mod h1:1xDLBhIqBIjhJ7lr2s+xRFFkQHpitSp8q2zzv1Dqg+s= +github.com/elastic/elastic-agent-libs v0.2.6 h1:DpcUcCVYZ7lNtHLUlyT1u/GtGAh49wpL15DTH7+8O5o= +github.com/elastic/elastic-agent-libs v0.2.6/go.mod h1:chO3rtcLyGlKi9S0iGVZhYCzDfdDsAQYBc+ui588AFE= github.com/elastic/elastic-agent-system-metrics v0.3.0 h1:W8L0E8lWJmdguH+oIR7OzuFgopvw8ucZAE9w6iqVlpE= github.com/elastic/elastic-agent-system-metrics v0.3.0/go.mod h1:RIYhJOS7mUeyIthfOSqmmbEILYSzaDWLi5zQ70bQo+o= github.com/elastic/elastic-package v0.32.1/go.mod h1:l1fEnF52XRBL6a5h6uAemtdViz2bjtjUtgdQcuRhEAY= @@ -1884,8 +1884,9 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= gotest.tools/gotestsum v1.7.0 h1:RwpqwwFKBAa2h+F6pMEGpE707Edld0etUD3GhqqhDNc= From ed9d75e9a3bc2d3473c61e969c597d3dfd24dca9 Mon Sep 17 00:00:00 2001 From: Anderson Queiroz Date: Thu, 30 Jun 2022 13:38:19 +0200 Subject: [PATCH 027/180] redact sensitive information in diagnostics collect command (#566) --- CHANGELOG.next.asciidoc | 1 + internal/pkg/agent/cmd/diagnostics.go | 84 ++++++++++++++++++++-- internal/pkg/agent/cmd/diagnostics_test.go | 80 ++++++++++++++++++++- 3 files changed, 155 insertions(+), 10 deletions(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 2085c1976a4..602b0293cc6 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -190,3 +190,4 @@ - Support scheduled actions and cancellation of pending actions. {issue}393[393] {pull}419[419] - Add `@metadata.input_id` and `@metadata.stream_id` when applying the inject stream processor {pull}527[527] - Add liveness endpoint, allow fleet-gateway component to report degraded state, add update time and messages to status output. {issue}390[390] {pull}569[569] +- Redact sensitive information on diagnostics collect command. {issue}[241] {pull}[566] diff --git a/internal/pkg/agent/cmd/diagnostics.go b/internal/pkg/agent/cmd/diagnostics.go index 3f68689930b..0fbba44be71 100644 --- a/internal/pkg/agent/cmd/diagnostics.go +++ b/internal/pkg/agent/cmd/diagnostics.go @@ -14,6 +14,7 @@ import ( "io/fs" "os" "path/filepath" + "reflect" "runtime" "strings" "text/tabwriter" @@ -34,10 +35,17 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/config/operations" ) +const ( + HUMAN = "human" + JSON = "json" + YAML = "yaml" + REDACTED = "" +) + var diagOutputs = map[string]outputter{ - "human": humanDiagnosticsOutput, - "json": jsonOutput, - "yaml": yamlOutput, + HUMAN: humanDiagnosticsOutput, + JSON: jsonOutput, + YAML: yamlOutput, } // DiagnosticsInfo a struct to track all information related to diagnostics for the agent. @@ -83,6 +91,7 @@ func newDiagnosticsCommand(s []string, streams *cli.IOStreams) *cobra.Command { } func newDiagnosticsCollectCommandWithArgs(_ []string, streams *cli.IOStreams) *cobra.Command { + cmd := &cobra.Command{ Use: "collect", Short: "Collect diagnostics information from the elastic-agent and write it to a zip archive.", @@ -115,7 +124,7 @@ func newDiagnosticsCollectCommandWithArgs(_ []string, streams *cli.IOStreams) *c } cmd.Flags().StringP("file", "f", "", "name of the output diagnostics zip archive") - cmd.Flags().String("output", "yaml", "Output the collected information in either json, or yaml (default: yaml)") // replace output flag with different options + cmd.Flags().String("output", YAML, "Output the collected information in either json, or yaml (default: yaml)") // replace output flag with different options cmd.Flags().Bool("pprof", false, "Collect all pprof data from all running applications.") cmd.Flags().Duration("pprof-duration", time.Second*30, "The duration to collect trace and profiling data from the debug/pprof endpoints. (default: 30s)") cmd.Flags().Duration("timeout", time.Second*30, "The timeout for the diagnostics collect command, will be either 30s or 30s+pprof-duration by default. Should be longer then pprof-duration when pprof is enabled as the command needs time to process/archive the response.") @@ -690,16 +699,77 @@ func saveLogs(name string, logPath string, zw *zip.Writer) error { // writeFile writes json or yaml data from the interface to the writer. func writeFile(w io.Writer, outputFormat string, v interface{}) error { - if outputFormat == "json" { + redacted, err := redact(v) + if err != nil { + return err + } + + if outputFormat == JSON { je := json.NewEncoder(w) je.SetIndent("", " ") - return je.Encode(v) + return je.Encode(redacted) } + ye := yaml.NewEncoder(w) - err := ye.Encode(v) + err = ye.Encode(redacted) return closeHandlers(err, ye) } +func redact(v interface{}) (map[string]interface{}, error) { + redacted := map[string]interface{}{} + bs, err := yaml.Marshal(v) + if err != nil { + return nil, fmt.Errorf("could not marshal data to redact: %w", err) + } + + err = yaml.Unmarshal(bs, &redacted) + if err != nil { + return nil, fmt.Errorf("could not unmarshal data to redact: %w", err) + } + + return redactMap(redacted), nil +} + +func toMapStr(v interface{}) map[string]interface{} { + mm := map[string]interface{}{} + m, ok := v.(map[interface{}]interface{}) + if !ok { + return mm + } + + for k, v := range m { + mm[k.(string)] = v + } + return mm +} + +func redactMap(m map[string]interface{}) map[string]interface{} { + for k, v := range m { + if v != nil && reflect.TypeOf(v).Kind() == reflect.Map { + v = redactMap(toMapStr(v)) + } + if redactKey(k) { + v = REDACTED + } + m[k] = v + } + return m +} + +func redactKey(k string) bool { + // "routekey" shouldn't be redacted. + // Add any other exceptions here. + if k == "routekey" { + return false + } + + return strings.Contains(k, "certificate") || + strings.Contains(k, "passphrase") || + strings.Contains(k, "password") || + strings.Contains(k, "token") || + strings.Contains(k, "key") +} + // closeHandlers will close all passed closers attaching any errors to the passed err and returning the result func closeHandlers(err error, closers ...io.Closer) error { var mErr *multierror.Error diff --git a/internal/pkg/agent/cmd/diagnostics_test.go b/internal/pkg/agent/cmd/diagnostics_test.go index d55f0a06721..f029697d77b 100644 --- a/internal/pkg/agent/cmd/diagnostics_test.go +++ b/internal/pkg/agent/cmd/diagnostics_test.go @@ -15,6 +15,7 @@ import ( "testing" "time" + "github.com/elastic/elastic-agent-libs/transport/tlscommon" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -30,7 +31,7 @@ var testDiagnostics = DiagnosticsInfo{ BuildTime: time.Date(2021, 1, 1, 0, 0, 0, 0, time.UTC), Snapshot: false, }, - ProcMeta: []client.ProcMeta{client.ProcMeta{ + ProcMeta: []client.ProcMeta{{ Process: "filebeat", Name: "filebeat", Hostname: "test-host", @@ -45,7 +46,7 @@ var testDiagnostics = DiagnosticsInfo{ BinaryArchitecture: "test-architecture", RouteKey: "test", ElasticLicensed: true, - }, client.ProcMeta{ + }, { Process: "filebeat", Name: "filebeat_monitoring", Hostname: "test-host", @@ -60,7 +61,7 @@ var testDiagnostics = DiagnosticsInfo{ BinaryArchitecture: "test-architecture", RouteKey: "test", ElasticLicensed: true, - }, client.ProcMeta{ + }, { Name: "metricbeat", RouteKey: "test", Error: "failed to get metricbeat data", @@ -137,3 +138,76 @@ func Test_collectEndpointSecurityLogs_noEndpointSecurity(t *testing.T) { err := collectEndpointSecurityLogs(zw, specs) assert.NoError(t, err, "collectEndpointSecurityLogs should not return an error") } + +func Test_redact(t *testing.T) { + tests := []struct { + name string + arg interface{} + wantRedacted []string + wantErr assert.ErrorAssertionFunc + }{ + { + name: "tlscommon.Config", + arg: tlscommon.Config{ + Enabled: nil, + VerificationMode: 0, + Versions: nil, + CipherSuites: nil, + CAs: []string{"ca1", "ca2"}, + Certificate: tlscommon.CertificateConfig{ + Certificate: "Certificate", + Key: "Key", + Passphrase: "Passphrase", + }, + CurveTypes: nil, + Renegotiation: 0, + CASha256: nil, + CATrustedFingerprint: "", + }, + wantRedacted: []string{ + "certificate", "key", "key_passphrase", "certificate_authorities"}, + }, + { + name: "some map", + arg: map[string]interface{}{ + "s": "sss", + "some_key": "hey, a key!", + "a_password": "changeme", + "my_token": "a_token", + "nested": map[string]string{ + "4242": "4242", + "4242key": "4242key", + "4242password": "4242password", + "4242certificate": "4242certificate", + }, + }, + wantRedacted: []string{ + "some_key", "a_password", "my_token", "4242key", "4242password", "4242certificate"}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := redact(tt.arg) + require.NoError(t, err) + + for k, v := range got { + if contains(tt.wantRedacted, k) { + assert.Equal(t, v, REDACTED) + } else { + assert.NotEqual(t, v, REDACTED) + } + } + }) + } +} + +func contains(list []string, val string) bool { + for _, k := range list { + if val == k { + return true + } + } + + return false +} From 94975cc6f476f528401a30d6ae6396274895d9ac Mon Sep 17 00:00:00 2001 From: ofiriro3 Date: Thu, 30 Jun 2022 16:35:02 +0300 Subject: [PATCH 028/180] Support Cloudbeat regex input type (#638) * support input type with regex * Update supported.go * Changing the regex to support backward compatible --- internal/pkg/agent/program/supported.go | 2 +- internal/spec/cloudbeat.yml | 11 +++++------ 2 files changed, 6 insertions(+), 7 deletions(-) diff --git a/internal/pkg/agent/program/supported.go b/internal/pkg/agent/program/supported.go index 4bd218a4ee9..50140f27617 100644 --- a/internal/pkg/agent/program/supported.go +++ b/internal/pkg/agent/program/supported.go @@ -27,7 +27,7 @@ func init() { // internal/spec/metricbeat.yml // internal/spec/osquerybeat.yml // internal/spec/packetbeat.yml - unpacked := packer.MustUnpack("eJzMe1uTo7iW7vv5Gf16zpnNJZ3dTMR+MGRxs5Mq4zISekOSE2wL7E4b2zAx/31C4mLAZFZWdvWe/VARlVjosrQu3/rW4r9+22Sn9WsWsX8cD2vyj+iQ/v/j+vW8fv2PImW//edvODVP6Ps+XgS6Nw88RjLESHzYYrB4dCzzgpdyiaCrIOjMQuhKEUBJqI7+lpFyH4PLPnYM5+QvnaNjuKcQTBKkBCcEJtI8DfIQuEcEFhq1XRktnaOxmcbORjadzSV2UrqFqs5I6jGcLTTXPumrL/J3P3CBH7gvvqTZi3J/fX7SNSc+UCMNHoilFdQKdlCRGbXdQ6g+PzrmceYY000I9dMc1mfaOEeDSTOSBUcEnx/5uvOlvsWqPoGqf4bK9UDUhXjuGNPYsZiEgPToWOiIQCC1z23//HWjH3Cmy9R+nolnxjTGyuQlVLQcpddDJZ/JGatT/vvJseSEPO3bscQypehpH6P0yhBc3J539tY8my/1AgH5TNPgJVKCydd43/5W/dNfEdzx+9iGSlASWUuIxcTYT81ju6ySKcvRpTtGikkanLCKGFRObP39dp7mn5h3o/P7zul0L95BKXuAqieRNEjw9328VqVaJuiAbZ8RpikhuMq9c9sew1awpZZWjMm6XkdaQ53d3kEJtgNGyt6+TkJPF+1ejtQKitvZ9RKBKwtV/0yyO7nfrVvNp8nU1uXqfDfZdO7y5Fgsj9JgS01tj4C5Q9Atv270318WBzWygvzrRj8iMMmoFe9d+1Sv42mz5fT/Ok/TOASTnWMlCZFObL2Md2ulXtOWjo5BGbbMklpsS5QgIam3d4tL7KouQxYr3eLC95BFiplGypdsbkwzbGkZUf2EKHE2W+z/+dv/G3qFnG5OeB2dhk4BBrsI+hsETMlIvRN62sdh75lZoI7BzcHzZi4M7zZmngbHEHpSBJ5PIaDcsPM1kI/O5lK9s1qJd4gSSBRO8whcT11HgVLzSJTVZj7tOZYSQV8mxkRCQL5gy5TQcsJwam6wFey+Aa4MHhuugYGZiz2AIKdGZzz0Cwru1jhihWYRmGTz9MpoGhy/AZ+FWZAN50WK90KsoAi54j9Ju7VkBv4XtlrsAj0wNfu7RJ++br9cnm1puEZCgX+glstCuMgrB8KyeSqfkS2cQY7AJAm5cj3JaQiuJVo6M7g4JCTzDyg1t5TfSRok1H4+9+4mc5m49Mx/4c4Jq4FE7ECCircPwSTj9yuc8GLfcYrjBmtsnuMFuB6dL6aMLCaJ9YzaQIzKEEmhbxCgB7zR1Qj6e8eg1dmMP7J5vB8xeO+FKkyKTK1AgLK1Pa0NsHFE/oRYq8r4bJd93UwzmFbrz4uHV7d2TkQJjgh4EladR254/Izksp85ha4jyy8dgx5w6rP10z4WhlDIvyPLlMJAK6ntshBIufgbeoza0mmt+Am1zBei+gUC5mmeThIMgpJY5hZBKeMBwLGCJFTiOAKTC4WLnMstApM/+T6gwnJkBQ+Nk6E2u3BZi7W7+7J9FRd6giz/BaWMYfFML7DiMaJ63CFxJ3TG6SpGqVY4VqBUQVTsr0RwUTsKLUdLPcWqw+fehdBP2vtbTsTf3Lbmxt1v4t7mhp7gdBEPZSJ8APQuIfBYdX+tU6vHyC/Eds8i6Cha61TvAyHLiRIU1NQSlPmMvC2XJ6xMFARdqblHIEt5cw9EPlH+nFp/1HL3S/670FmYSITbgqkdIyjugPuFstlTE3Cb92o9O1ArOH3d6M15mucSLjtjxdk7AdzWE2rFXV0bC95b50v1e6XHQelY6Ew2ut6xd263WQQeYmEHxjSr7HdxdouH2IeIkYxJEQ/EXE+47Da6FFkml0/ZrmML+UgILOIQPsfUSphj1f5jqechkPn9NUGNyyknyjWhVvCG/fVBirinRSfY3dlinM+KXdZ9V8z7tI/dpy+zDngQe+kBDwsVWJH6tm7c7gADbUfBlXVkmjrWl7i1V0M/kEIviRVsI4AOQgaKltM0KLhfqmUldMZVGrvxeTDGrgB4MnWLh3aPGJivKNASkrlJH+C0utLaU++81h+PjnWbv/7txGWFgaagQBM+rftODWKG833knTZu/ew6re0v9j8GhZYAeUKe/J4joOXtPZguIzA4kHQVhzwuWN4Zp+iACl3Chb7Fiswc29+TVJORElc6+cXMuP9yjCTHhX7EipdgQ08jcGWk2OU/cZYqrphawXUDKuiAraC207dAZQfobe/8QA2ga5upZUNT80hBUHZkwsGk0Hk07T4T679EgGMRWkTQ7wBvKSYqK7nuGPF+63wxd8h+jqlNz45l7pCppREIjvxZ15eSQgDNxq/HDW6KwKjPbwEhVCoZz1O2mxsjzzMvD6FeRpYmjf/OfY53wMrDIKZVv4vYlv7x6NgnrZK3dyZ2c0d6SlLtdAfYq4Spi3NmfO6hP0CZe+Z4o48d+z62srMWG81qvNID9Tc84LZ3N1/212/tvcZEre5xnYfiXVbdVx3zDL0F8o0/qfxsDcwNrvOTNAJUJqkpbGUQMweJwH5GlYThLY9/QY5Ufz8z/N/7Z2/O+ZcSA5EMkJTP+yySgZ9KDAjb53QsMVCuZwoGiUHv2SAx+LcC+Qf6cXA8ANr9M26pxc6YaZXCm1oFhKsMlBvHEavBToDl6X7IPKQRTFqQWSuFUMq3gKRTA22ePWKFcofXH98490C7kFTbIuiVHBTfGA0th4pw1McIeFJlqB7jMggBkhB02oB3B87+pRl7X26DjLpiF0ztgFN0boypt79th2kZcQaN8feDQJv1nyn0OaifGRndI/Dw6FhXhlMqRQY3uNohqdLBeXqIn2vwHFlmuVSCCTfiRpdelpfYVZrk1ysRMItQiYXhfdog1xk97DfZ0B59MNkRi+vhIl4pwZZC90Dt3SxU5F0FGPwLVlhODblEwJNJyqT1QLfRbY4TsgLFELqBDqGyenSeQvXrUzwLgSecG6zlANUK3H4Vzi7Ika2feSA00usZydolhP6+ul8OQFw14vI0nPN3i21IahbrpWY2Tmsu3d6fq54UQp/NlesZFVpn/9Kfcz53wXVVP0ZgIvPEztloZ2Ivzj64JkT1D2Ghmbd3tJIK36EdsULO3XPONhP+bMN1gAp701RS7mNn9/wIzeuCpFpGUvPkfKlABjSv7X7F/5s1zCvh90atgECLn/1KRtdJvT0C3quQn+on2Lo81gkMC2VNgKFh0uKkHblAj4VqIECGU4+rmdc2qDk8iKcsXS+d27ONdOK65bQBcbohqs8DUtE8oxY7IaDJXBeey+mMWFop7CzzJA7y6jt+QMDjCesRfR8kRIY7rmfNPqwqJrTB1nDbubv7mi/l9k7qcSW1fEYyp/PMOc1hcEGqmyBrNXjuMqJoMkk9RoqODN6QY3/85DGC002TCEZAZtyev26myvPTdEZsl0E1yCMw4Tp1xE/72Xyps7UVbCsgumrAkND9r5vppqsH5GabzRoJSWkfZBqujNNWPzZdMDm8x3H5jOz7hwTPDUhBdeCX3/PtdZLQTRKHwJnLp9WLqZAX13MJQfdlOPYNMued5EcfxqITT4gpWIkzYWBehvbUY91tV8ZWb68/TIgEmZEGRS/h2N7LqmuTfab/LvbcscpiHxAdiMLOOB4Hi9Wc/gAc/vpY1Y9JBw50S2q7Cbcj7itxqkkOX091ZZz5BwxWeQjdbWRL8bfvUuwqZoG/h5JbVOu79qmgYCJ0dJ6iBAN2XMN6rGCxk4QapDqP4f9OsiAXvmk5OYXgcCZZPbYk2Ww5nd3FyJcNW49gVp/7LFAlAHVcFP62whmHyg43Ou5VpDKPUTu4zFN2xB/Ang2+5Qkfsa4JtVZjBO6g6iUnOOUJliySvM54iWTBPb61tAzxWFVMjggihp/kHQKujIofVtOs5epqfoCUvsnFmNSJDckbMoYUI3IQ1TizoBZLwwpbCoxMCq0r4wJBHtddCSocQ9Q6b2rbyDJzxHFGbbs/8leiOpMeeELPE+8dgnHjd4cY+W8nLudL/RTC6cDvD5PgOz9TVR4V84hNTcJyRVz2/E2VOA/IyR9WG5tcga0tjxF7wXOwTUuiFuJ+2/urdLxHOm/gYrDHseoo98fQZ/w+esTF5bP7b2W9QQCdSbp6lyQYEDg9wrY966WOzbZfhCKf4mfwX8S+b8TQ4L68IRn8d5LEIlaFYFKK3LZPODb76udMXHbAvJBeFbN+zv1ChcvE/7vvcH2gKc+Ha4wp7IP0K7hNgUH1JGSxXJzZ8g94sI7wOdniTC3vwvcX9vSuO0+wI0/7mAL/MthLhcMtbRspAccUO6x4r938s5pHYP8CAf9AZK3Elqbyc33d6NWzy/3556o3ITxnKPeCeOvI4SPEokyU4OWOFPobydX3ii6fXr9T9PnLpGivCDfM16U4hLScgypfp4ophUrc/w0+dzCO+Uihy+ag0o/IDm64vc75UarJOPWLddcHKRMpBCy/6YcUN7pze59uiSFfKHBLCm7jItuXiKl18Wcz757a/iXKvDO++eQ/sRoUODWP6DbuNQToNbztp2M3UoyhKXXmzBH0U66nt339UXrboCIfu9iuo7/zZd/O+N9r6PXW4bZ204dgR25z8Vzh4fYbPWCeH9/mrgj22if+dZK85eze1ImmqNyN0RWBLWJ1yv1mk3OEinZpCy1NMcgaI31F/l3xbj+Fu2+Y74Ok70+Ttp018kZmL8td/G0zvTgc1xj6PoTeHMHd3rVPNb/kaxxbo4avUD0mMLNBjo5BK/0oiMBzrnJKUHpKRJcH922CG/H2brEbwb5svT6Nd3j5VQ4Sr5r8peZt69zk1OY1N+6W5+IV32bIJ6z4zGHSoAus6o5qu6zigyjK3DhQdFi3uEtqCge9bpy7fPMHeeCobxov2HRzr1Mnb/s16w9t4Z09NDbxrj+vY0Krn/U+m71ARRQeRjlX0XlX6Cm2AkaNSdOVlzdz3Zpw2twuhm1Rt8UaN+6z4qgbfyCKW3hUPsI2cKsHWdNtN7lghWO7nShajazV4OP82WjHNuu2vDSygjSEwZHa41zzPXd8t489Vj1pwPHeyUl0vI13dOWN3sxTLv8qPkBVP5Ns8aO1S6Jc7rr+Gh8x307zoU52+YqOrKr9tvvTuzbWXf+O/x69r7vOvzpeV0XGu3eruZMzUVu96OQ7b2PdN/f5Dp7p2gGFHmtzxOmnOhFHcdlfmmM5EfkIUfUkVFafOtebzUefO+MtL4k/1eU40K3px2ojjZ02ObgVJMgKhB8SsX20dlLZxyA2/uJOyJGYmKyj17HuxqUVJCTrFzGj3rNOIPxoAfMTZNDPtEt/qDvxLkhzcOKXcyXhIEP6PBnzfuHyLqDKmugEhKrLkBI8dFugxwuCLk9M10TljjZh4mKLP/LZZUSRBwD1PVL5fWDbMczmbm6O9r7Q2XRRZQEbGsEYyP07CpN9x9wBr/+KgqWQDa2SoqUAq4Joo5ZZRgY5GPE/74uW6fr0uiEjFvgdBBJJ2bbWyPqjg7qRX6mp2vEPC5rWgAO2pB9TrA2Nm/kMQ/0oSos/bj/4yY8Xrmek8PSL5FhQIBcNWcGGAjKcNwtl7YKgu+Xzflv6v39fBavVjj19gJo9IegXEfDqHpGm72rC4bmgGMfOZbzpEfqyJmnAb7KgpnbGTGtSlZdQSRKcUm6dleZnLZ05DvE/0g9b331N07VW1HgWqHZoycZyPtrCUKUGN11Sn38VnfgO1XtHHQ7gT53uBlqCresLtbQXbLGSPu3v+h47a3bgw93Zc6xoPWoNwWSLoC4JSJ7VkAVWPfkRWFS96o2tGEKfOq0ibskjx2Cv49R083FMv1/5s+e43WEapLc0PHigtptwnRByMjVBxd7KkgPYqbbnnfX3OHg++IBntPT5VuQaKXeSdPXomA/5rNAa2yzd6fu96f/bJdCBvt/pKZc7zjzhJ6FKD9RKXkgaZAgml36Jor6D+/7dgts43MT/WD1dRZT/tnl4nS3vZVTNw9eIHx3D7yKCClJXsaI7d9NneEeN9/oDb5TdKQQnBhWzIKk5GdXj1k8MYHilK+2ef7K3t/PeYvZjWP8WrfCRd/rfLPwS6vjzc4ykSB84gx2UKNA6KcYv7T/txU3x7lC3WvqxFxPzsdjQlkJ7tuzc06hZxyYHuKaRy7t0ZE8GCQuBz/ic1flHPyYb4IN/gx7S/fHPfP1ajME/1btSEBTrflX+TFRTRtCdDCvzP1GV/3no9zMdpB+qxruf7zKtP+fCQ/m8232nlQQGjGS72ee6zJr3mVCdD3eX2f4Bg+BM4eLRefpyMToM8BvJYpdhbz7NGsC26xnVkO1j3a06h6uiUwFbTMgwAoLd464j5aH97e6Av6861t7fB6shH2xlH3TXdlzIuEv4JaxKqGjiU6wQsJzaz4+Dzhwx7mU5Tb4tp9lzGSvuZcQVHCKyW49RMSvL3EZKIPUSQZsHkBNrmvbbRLAgJ79KPX+QBPIxd2Pf7SUXNZZCNqtay/t9Of2xbyZ/2Vsfa5L+mT9Px/xF2qP/gdeblMclBN4rGuH+xrW5uzfng9z9kD/+V2n3T309Pfvtv//P/wQAAP//cJQCCQ==") + unpacked := packer.MustUnpack("eJzce1mToziX9v33M/r2m5lmSWc3E/FeGLLY7CTLuBIJ3SHJCdgCu9PGNp6Y/z4hsRgwuVR29ztvzEVGVGGh5egsz3nO4b9+SfPD6jWP2K/73Yr8Gu2yf9+vXo+r1/8oM/bLf/6CM/OAfmzjRaB788BjJEeMxLs1Bot7xzJPeClfEHQVBJ1ZCF0pAigJ1dHfcnLZxuC0jR3DOfhLZ+8Y7iEEkwQpwQGBiTTPgiIE7h6BhUZtV0ZLZ2+k09hJZdNJT7GT0TVUdUYyj+F8obn2QX/+Jv/wAxf4gfviS5q9uGzPjw+65sQ7amTBHbG0klrBBioyo7a7C9XHe8fczxxjmoZQP8xhfabU2RtMmpE82CP4eM/XnS/1NVb1CVT9I1TOO6IuxHPHmMaOxSQEpHvHQnsEAql9bvvHp1Tf4VyXqf04E8+MaYyVyUuoaAXKzrtKPpMjVqf894NjyQl52LZjiWVK0cM2RtmZIbi4Pu/srXk2X+olAvKRZsFLpASTp3jb/lb96a8Ibvh9rEMluBBZS4jFxNgvzWO7rJIpK9CpO0aKSRYcsIoYVA5s9eN6nuZPzJvq/L4LOt2Kd1DG7qDqSSQLEvxjG69UqZYJ2mHbZ4RpSgjOcu/ctsewFayppZVjsq7XkVZQZ9d3UILtgJFLb18HoaeLdi97agXl9ez6BYEzC1X/SPIbud+sW82nydTW5ep8V9l07vLgWKyIsmBNTW2LgLlB0L08pfpvL4udGllB8ZTqewQmObXirWsf6nU8bbac/n/nYRqHYLJxrCQh0oGtlvFmpdRr2tLeMSjDlnmhFlsTJUhI5m3d8hS7qsuQxS5ueeJ7yCPFzCLlWz43pjm2tJyofkKUOJ8ttv/45d+GXqGg6QGvosPQKcBgE0E/RcCUjMw7oIdtHPaemSXqGNwcPKZzYXjXMfMs2IfQkyLweAgB5YZdrIC8d9JT9c7zs3iHKIFE4bSIwPnQdRQoM/dEeU7n055juSDoy8SYSAjIJ2yZElpOGM7MFFvB5jvgyuCx4RoYmIXYAwgKanTGQ7+k4GaNPVZoHoFJPs/OjGbB/jvwWZgH+XBepHgvxArKkCv+g7RZSWbgf2PPi02gB6Zm/5Dow9P62+nRloZrJBT4O2q5LISLonIgLJ9n8hHZwhkUCEySkCvXg5yF4HxBS2cGF7uE5P4OZeaa8jvJgoTaj8fe3eQuE5ee+y/cOWE1kIgdSFDxtiGY5Px+hRNebDtOcdxgjfQxXoDz3vlmyshikljPqA3EqAyRlHqKAN3hVFcj6G8dg1ZnM37P5/F2xOC9F6owKTK1EgHKVva0NsDGEfkTYj1Xxme77Cmd5jCr1p+Xd69u7ZyIEuwR8CSsOvfc8PgZyWk7c0pdR5Z/cQy6w5nPVg/bWBhCKf+GLFMKA+1CbZeFQCrE/6HHqC0dVoqfUMt8IapfImAe5tkkwSC4EMtcIyjlPAA4VpCEShxHYHKicFFwuUVg8gffB1RYgazgrnEy1GYnLmuxdndftq/iUk+Q5b+gjDEsnuklVjxGVI87JO6Ejjh7jlGmlY4VKFUQFfu7ILioHYVWoKWeYdXhc29C6Cft/S0n4v/ctubGzW/i3uaGnuBsEQ9lInwA9E4h8Fh1f61Tq8fIL8R2jyLoKFrrVG8DISuIEpTU1BKU+4y8LZcHrEwUBF2puUcgS0VzD0Q+UP6cWr/Xcvcv/HehszCRCLcFU9tHUNwB9wuXZk9NwG3eq/VsR63g8JTqzXma5xK+dMaKs3cCuK0n1Iq7ujYWvNfOt+r3So+Di2OhI0l1vWPv3G7zCNzFwg6MaV7Z7+LolnexDxEjOZMiHoi5nnDZpboUWSaXz6VdxxbykRBYxCF8jKmVMMeq/cdSL0Ig8/trghqXU0GUc0Kt4A3764MUcU+LTrC7scW4mJWbvPuumPdhG7sP32Yd8CD20gMeFiqxIvVt3bjeAQbahoIz68g0c6xvcWuvhr4jpX4hVrCOANoJGShaQbOg5H6plpXQGVdp7MbnwRi7AuDJ1C3v2j1iYL6iQEtI7iZ9gNPqSmtPvfNav9871nX++rcDlxUGmoICTfi07js1iBnO95l32rj1s+u0tr/YfgwKLQHyhDz5PUdAK9p7MF1GYLAj2XMc8rhgeUecoR0qdQmX+horMnNsf0syTUZKXOnkNzPn/ssxkgKX+h4rXoINPYvAmZFyU/zEWaq4Ymol1w2ooB22gtpO3wKVHaC3vvEDNYCubaaWDc3MPQXBpSMTDiaFzqNp95lY/yUCHIvQMoJ+B3hLMVHZheuOEW/Xzjdzg+zHmNr06FjmBplaFoFgz591fSkpBdBs/Hrc4KYIjPr8FhBCpZLxPGObuTHyPPeKEOqXyNKk8d+5z/F2WLkbxLTqdxHbst/vHfugVfL2jsRu7kjPSKYdbgB7lTB1cc6Mzz30Byh3jxxv9LFj38dWdtZio1mNV3qg/ooH3Pbu5sv++q2915io1T2u81C8y6r7qmOeobdAvvEnlZ+tgbnBdX6SRYDKJDOFrQxi5iAR2M6okjC85vEvKJDqb2eG/1v/7M05/1RiIJIBkvF5H0Uy8FOJAWHbgo4lBsr5SMEgMeg9GyQG/1Igf0c/D44HQLt7xngnHFCdcQpHsGrBkVQZSVpnktmOOx/uJDYIxp1MVwS+e8c6H5HKFbo/vgE/UO0Aw/X2HdZhHGw5NUjnmSdWKHeWAohi69S7ywbUXEGLy5DCCsemO2rFsat4e6wGmyoBIEW9X+7EJQTdl8ZpQpWPT15IFuQIJqdOxn0L7rnRZs/3jkFXXfnO023+ESMwlFflKIWcMw4gquSlu0dnlGW4ZQ36jufGYcT/lEw+dpVDgrJDUv2b64jHgc/WLTezG2Nd5XS3TfOhrfpgsiGWtsP5In5WgjWF7o7am1moyJsKTPgnrLCCGvIFAU8mGZNWA71H1zkOyAoUIxes2i5Unu+dh1B9eohnIfCE4+NBQtiSWunQk3CEQYFs/ciDpJGdj0jWTiH0t5XcOThx1Qjc3TuGc/xhsZRkZrlaamYjn7l0fX8uZOCzOZdHqXX2L/0x53OXDp9zH4GJzJM+J9WOxF4cfXBOiOrvwlIzr+9oFyr8irbHCjl2zzlLJ/xZyvWFiqROU8llGzubx3tonhck03KSmQfnWwVAoHlu9yv+3axhngn3NdQKCOR2Lp/J6DqZt0XAexXyU/0EW6f7OrlhoawJoDRMaJysIxfosVANBABx6nE1K9sGPIcH+Ixlq6VzfZZKBx4InDZYTlOi+jxYlc0zarEDAprMdeHxMp0RS7tQk+/fkzgArO/4DgGPJ7N79GOQLBnuuJ41+7CqeNEGYsNt5+7ua76U2zupx12o5TOSO51nzmEOgxNS3QRZz4PnLiOKJpPMY6TsyOANOfbHT+4jOE2bJDECMuP+4CmdKo8P0xmxXQbVoIjAhOvUHj9sZ/OlzlZWsK5A6nMDlITuP6XTtKsH5GqbzRoJyWgfgBqujLNWP9Iu0Bze47h8Rvb9IflzBVlQHbCs7zG1dQLRTSCHoJrLp9WLqZDXTSy5BXbOTfI9nhjpQ2b5wJNlCp7FmTAwT0N76jHytitjq7fXD5MlEXuzoOwlI+tbWXVtsl8FkAbJyS3jLPYB0Y4o7IjjcSBZzekP4tCZ4YxKkcHjUC0/Vdo5D3fxY00CRZZ5WSrBhM/RxLqXJY89DYnrXRAwy1CJRwDkjoNgjn8SbkfcV+JMkxy+nurKOPd3GDwXIXTXkS3F339IsauYJf4RSm5Zre/ah5KCidDReYYSDNh+BeuxIi4mHHdU5zH830geFMI3LSeHEOyOJK/HXkg+W05vY+RLylYjeNbnPgtUyUEdF4W/FThnuqvsMNVxr1qVe4zawWmesT3+BC5tsC9PBol1Tqj1PEbuDipicoIznnzJIgHsjJdIHtxiX0vLOV5D5WSPIGL4Qd4g4Mqo/LDSZi2fz+YnCOurXIxJnfSQoiFqSDkiB1GpM0tqsYxjhKdUF/iOlFpXxiWCPK67ElQ4hqh13tTWkWUWiOOM2nY/8ldv4+0bDPy3k5rzpX4I4XTg94cJ8o2fqaqSirnHpiZhuSI1e/6mSqoHxOWHlcgGg7OV5TFiL3h+lrYEaynut72/Ssd7hHQKF4M9jlVOuT+GPuP30SM1Tl/dfyvrFAF0JNnzuwTCgNzpkbntWU91bLb9MhQEBz+D/yL2fSWNBvflDYniv5NAFrEqBJOLyHv7ZGSzr34FlMsOmCfSq3DWz7lfqHCZ+Hf3Ha4PNOO5co0xhX2QfnW3KT6onoQsVogzW/4OD9YRPidfHKnlnfj+wp7edecJNuRhG1PgnwZ7qXC4pa0jJeCYYoMV7xVBZzCPwP4lAv6OyNoFW5rKz/WU6tWz0+3556o3ITxnuGwFKdeRw2dIR5kowcsNYfQ3Eq/vFWS+vH6nIPSnCdNegW5YfZfiENLLHFR8AVVMKVTi/m/wsYNxzHsKXTYHlX5EdnDF7VY1B8o0GWd+uer6IGUihYAVV/2Q4kZ3ru/TNTHkEwXuhYLruMj2JWJqXfzZzLultn+Kcu+Irz75D6wGJc7MPbqOew0Beg2v++nYjRRjaEqdOQsE/Yzr6XVfv1+8dVARk11s19Hf+bJvZ/z/K+j11uG2dtWHYEOuc/Fc4e76G91hnh9f567I99on/nkCveXz3tSJpuD8MWcjxaGindoiTFMossYIYZF/15zcz+DuK+b7JCH804RuZ42ikdnLchN/T6cnh+MaQ9+G0JsjuNm69uFIoc/HaBxbo4avUD0mMLNB9o5BK/0oicBzP8UPvbDV6jDe/eVXOUj83OQvNadb5yaHNq+58ro8F686fQz5gBWfOUwadIhVnVNtB9Zn+NK436lzk29+kAe+xS+OFHO6udehk7f9NesPbeGdPTQ28a4/r2PClX+s9tnsBSqiKPEWtxmDUs+wFTBqTJqOvaKZ69qg0+Z2MWwLvi3WqHVz0fCpjT8QhS88Kh9hG7jVg7zpxJucsMKx3UYUtEbWavBx8Wi0Y69cr5jHf0FWkIUw2FN7vHPsltO92ccWq5406Pq6kZPohhvv9ioavZlnXP5VfICqfiT54qO1L0Q53XQENj5ivp4WQ53s8hUdWVX7bfend22su37fJ3b+xgpm/T+pKUDevFvNnRyJ2upFJ995G+u+uc938EzXDij0WJsjTr/UpTiKy/7UHMuJyEeIqieh8vylc73ZmPS1M17zkvhLHZAD3ZqO8VPN2KF/uObgVpAgKxB+SMT2nG4RuLvv81GVfQxi419cWxmJickqeh3rfFxaQULyfoEz6j3rBMLPFje/QAb9TCv1pzoXb4I0Byf+Za4kHGRIXydjakAJk7ZoWQOj8YAqa6JLEKouQ0pw122PHm/vdXliuiIqd7QJExdb/l7MTiOKPACo75HK7wPbjmE2d3N1tLdty02HVR6wzxQm33LMbzvkj42v75g74HXU6P4aErjtLhCyoVVStBRgVRBt1DIvkUF2RvyP2w6DbHV4TcmIBf4AgUQytq41sv4goW7yV2qqdvyjg6ZtYIct6WOKtaFxc59hqO9FafHj1oSf/LDhfEQKT79IgQUFctKQFaQUkOG8eShrJwTdNZ/3+9L/7cdz8Py8YQ+foGYPCPplBLy6f6TpyZpweC4oxrFzGW96hL6sSRbwmyypqR0x05pU5SVUkgRnlFtnpfl5S2e+0RLxiV7Z+u5rmu5z7RAW91TizPuooplbqBgCJLUUQdty0X4w8lfRie9QvTfU4QD+1OluoCXYOr9QS3vBFrvQh+1NT2RnzQ58uDl7gRWtR60hmKwR1CUByfMassCqXz8Ci6qPvbEVQ+jTiWTaGkHvwj0RjxyDvY5T082HM/1e5q+e43qHWZBd0/DgjtpuwnVCyMnUBBV7LUsOYKfannfW3+Pg+eDjntHS51uRa6TcKdpazLtiVmqNbV7c6ft96//bJdCBvt/oKZc7zj3hJ99u8+ncwW1vb8ltHKbxr88PZxHlv6d3r7PlrYyqeUTr0b1j+F1EUEHqKlZ05256EG+o8V7v4JWyO4TgwKBiliQzJ6N63PqJAQyvdKXd80/2/XbeW8w+hvVv0Qqfeaf/PcNfQh1/fY6RFOkTZ7CDCwq0Torxl/am9uKmeHeoWy392IuJxVhsaEuhPVt2bmnUvGOTA1zTyOVdOrIng4SFwGd8zur8o+1pA3zwL9Bfut3/UaxeyzH4p3pnCoJy1a/KH4lqygi6k2Fl/ieq8j8P/X6mu/RT1Xj36x2o9adeeCifd7vvtAuBASP5Zva1LrPmfVZ1iX62u8z2dxgERwoX987Dt1O3Y/aNZLHLsDefbQ1gW9Mp21H/xr0EWgeytCbH4aroVMAWEzKMgGD3uOvIeGh/uzvg76uOtff3yWrIJ9vcB9+s/jM6VjezUNHEZ1ohYAW1H+8HnTli3MtymnxfTvPHS6y4pxFXsIvIZjVGxTxb5jpSAqmXCNo8gBxY09DfJoIlOfhV6vlBEsjH3Ix9t89c1FhK2axqLe/35fTHvpn85W99yEn6Z/46HfMnaY/+x19vUh6nEHivaIT7+z/Wj/3Rl9WzX/77//1PAAAA//9rUQ6t") SupportedMap = make(map[string]Spec) for f, v := range unpacked { diff --git a/internal/spec/cloudbeat.yml b/internal/spec/cloudbeat.yml index 9cfda42344d..b22ed7c328d 100644 --- a/internal/spec/cloudbeat.yml +++ b/internal/spec/cloudbeat.yml @@ -7,8 +7,6 @@ args: [ ] restart_on_output_change: true artifact: cloudbeat -action_input_types: - - cloudbeat rules: - fix_stream: {} @@ -19,11 +17,12 @@ rules: on_conflict: insert_after type: logs - - filter_values: - selector: inputs + +# All Cloudbeat input types begin with 'cloudbeat'. + - filter_values_with_regexp: key: type - values: - - cloudbeat + re: '^cloudbeat.*' + selector: inputs - inject_agent_info: {} From 121a4ad3ff915d6c9700919e1a1e7f188b03f3e1 Mon Sep 17 00:00:00 2001 From: Pier-Hugues Pellerin Date: Thu, 30 Jun 2022 10:22:02 -0400 Subject: [PATCH 029/180] Disable flaky test download test (#641) --- internal/pkg/artifact/download/http/downloader_test.go | 1 + 1 file changed, 1 insertion(+) diff --git a/internal/pkg/artifact/download/http/downloader_test.go b/internal/pkg/artifact/download/http/downloader_test.go index 6fa2777c02f..26164df3a49 100644 --- a/internal/pkg/artifact/download/http/downloader_test.go +++ b/internal/pkg/artifact/download/http/downloader_test.go @@ -25,6 +25,7 @@ import ( ) func TestDownloadBodyError(t *testing.T) { + t.Skip("Skipping flaky test: https://github.com/elastic/elastic-agent/issues/640") // This tests the scenario where the download encounters a network error // part way through the download, while copying the response body. From 319ea0a35ebca6f32176b680fbd49fb50e48f552 Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Fri, 1 Jul 2022 01:33:14 -0400 Subject: [PATCH 030/180] [Automation] Update elastic stack version to 8.4.0-3d206b5d for testing (#656) Co-authored-by: apmmachine --- testing/environments/snapshot.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/testing/environments/snapshot.yml b/testing/environments/snapshot.yml index 94af4334d03..3af879be603 100644 --- a/testing/environments/snapshot.yml +++ b/testing/environments/snapshot.yml @@ -3,7 +3,7 @@ version: '2.3' services: elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch:8.4.0-86cc80f3-SNAPSHOT + image: docker.elastic.co/elasticsearch/elasticsearch:8.4.0-3d206b5d-SNAPSHOT # When extend is used it merges healthcheck.tests, see: # https://github.com/docker/compose/issues/8962 # healthcheck: @@ -42,7 +42,7 @@ services: - ./docker/logstash/pki:/etc/pki:ro kibana: - image: docker.elastic.co/kibana/kibana:8.4.0-86cc80f3-SNAPSHOT + image: docker.elastic.co/kibana/kibana:8.4.0-3d206b5d-SNAPSHOT environment: - "ELASTICSEARCH_USERNAME=kibana_system_user" - "ELASTICSEARCH_PASSWORD=testing" From fab95b874d8b64344728038055f35e6b61999273 Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Mon, 4 Jul 2022 01:35:23 -0400 Subject: [PATCH 031/180] [Automation] Update elastic stack version to 8.4.0-3ad82aa8 for testing (#661) Co-authored-by: apmmachine --- testing/environments/snapshot.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/testing/environments/snapshot.yml b/testing/environments/snapshot.yml index 3af879be603..184c29a188d 100644 --- a/testing/environments/snapshot.yml +++ b/testing/environments/snapshot.yml @@ -3,7 +3,7 @@ version: '2.3' services: elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch:8.4.0-3d206b5d-SNAPSHOT + image: docker.elastic.co/elasticsearch/elasticsearch:8.4.0-3ad82aa8-SNAPSHOT # When extend is used it merges healthcheck.tests, see: # https://github.com/docker/compose/issues/8962 # healthcheck: @@ -42,7 +42,7 @@ services: - ./docker/logstash/pki:/etc/pki:ro kibana: - image: docker.elastic.co/kibana/kibana:8.4.0-3d206b5d-SNAPSHOT + image: docker.elastic.co/kibana/kibana:8.4.0-3ad82aa8-SNAPSHOT environment: - "ELASTICSEARCH_USERNAME=kibana_system_user" - "ELASTICSEARCH_PASSWORD=testing" From acc91fcaca2b6bc3f74aa61fc52154641f0d8716 Mon Sep 17 00:00:00 2001 From: Victor Martinez Date: Mon, 4 Jul 2022 09:48:09 +0100 Subject: [PATCH 032/180] jjbb: exclude allowed branches, tags and PRs (#658) cosmetic change in the description and boolean based --- .ci/jobs/elastic-agent-mbp.yml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/.ci/jobs/elastic-agent-mbp.yml b/.ci/jobs/elastic-agent-mbp.yml index f3772fd3855..8947d15880a 100644 --- a/.ci/jobs/elastic-agent-mbp.yml +++ b/.ci/jobs/elastic-agent-mbp.yml @@ -2,7 +2,7 @@ - job: name: "elastic-agent/elastic-agent-mbp" display-name: elastic-agent - description: "POC to isolate elastic agent from beats" + description: "Elastic agent" project-type: multibranch script-path: .ci/Jenkinsfile scm: @@ -12,6 +12,7 @@ discover-pr-forks-trust: permission discover-pr-origin: merge-current discover-tags: true + head-filter-regex: '(main|7\.17|8\.\d+|PR-.*|v\d+\.\d+\.\d+)' notification-context: 'fleet-ci' repo: elastic-agent repo-owner: elastic @@ -39,4 +40,4 @@ timeout: 100 timeout: '15' use-author: true - wipe-workspace: 'True' + wipe-workspace: true From f8ca007e224bc9caa1434cd293bac1e2efddf38b Mon Sep 17 00:00:00 2001 From: Julien Lind Date: Mon, 4 Jul 2022 11:49:44 +0200 Subject: [PATCH 033/180] Update elastic-agent-project-board.yml (#649) --- .github/workflows/elastic-agent-project-board.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/elastic-agent-project-board.yml b/.github/workflows/elastic-agent-project-board.yml index 1b296620b09..e6add0d093c 100644 --- a/.github/workflows/elastic-agent-project-board.yml +++ b/.github/workflows/elastic-agent-project-board.yml @@ -14,7 +14,7 @@ jobs: with: headers: '{"GraphQL-Features": "projects_next_graphql"}' query: | - mutation add_to_project($projectid:String!,$contentid:String!) { + mutation add_to_project($projectid:[ID!]!,$contentid:ID!) { updateIssue(input: {id:$contentid, projectIds:$projectid}) { clientMutationId } From 42b1a9654ed82c8e2ecf49a961a270e1440c4b53 Mon Sep 17 00:00:00 2001 From: Victor Martinez Date: Mon, 4 Jul 2022 12:24:58 +0100 Subject: [PATCH 034/180] ci: fix labels that clashes with the Orka workers (#659) --- .ci/Jenkinsfile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.ci/Jenkinsfile b/.ci/Jenkinsfile index 10c6c1a4347..9454f8d2cc6 100644 --- a/.ci/Jenkinsfile +++ b/.ci/Jenkinsfile @@ -79,7 +79,7 @@ pipeline { axes { axis { name 'PLATFORM' - values 'ubuntu-20.04 && immutable', 'aarch64', 'windows-2016 && windows-immutable', 'windows-2022 && windows-immutable', 'darwin && orka && x86_64' + values 'ubuntu-20.04 && immutable', 'aws && aarch64', 'windows-2016 && windows-immutable', 'windows-2022 && windows-immutable', 'darwin && orka && x86_64' } } stages { @@ -164,7 +164,7 @@ pipeline { } } environment { - ARCH = "${PLATFORM.equals('aarch64') ? 'arm64' : 'amd64'}" + ARCH = "${PLATFORM.contains('aarch64') ? 'arm64' : 'amd64'}" DEV = true EXTERNAL = true } From f873f3683bb3cfe3985bf2399ed1373e0a0570aa Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Tue, 5 Jul 2022 01:33:54 -0400 Subject: [PATCH 035/180] [Automation] Update elastic stack version to 8.4.0-03bd6f3f for testing (#668) Co-authored-by: apmmachine --- testing/environments/snapshot.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/testing/environments/snapshot.yml b/testing/environments/snapshot.yml index 184c29a188d..3f14fe74810 100644 --- a/testing/environments/snapshot.yml +++ b/testing/environments/snapshot.yml @@ -3,7 +3,7 @@ version: '2.3' services: elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch:8.4.0-3ad82aa8-SNAPSHOT + image: docker.elastic.co/elasticsearch/elasticsearch:8.4.0-03bd6f3f-SNAPSHOT # When extend is used it merges healthcheck.tests, see: # https://github.com/docker/compose/issues/8962 # healthcheck: @@ -42,7 +42,7 @@ services: - ./docker/logstash/pki:/etc/pki:ro kibana: - image: docker.elastic.co/kibana/kibana:8.4.0-3ad82aa8-SNAPSHOT + image: docker.elastic.co/kibana/kibana:8.4.0-03bd6f3f-SNAPSHOT environment: - "ELASTICSEARCH_USERNAME=kibana_system_user" - "ELASTICSEARCH_PASSWORD=testing" From bbd93ff75ccb5680626a45946e28787ef590ca63 Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Wed, 6 Jul 2022 01:39:21 -0400 Subject: [PATCH 036/180] [Automation] Update elastic stack version to 8.4.0-533f1e30 for testing (#675) Co-authored-by: apmmachine --- testing/environments/snapshot.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/testing/environments/snapshot.yml b/testing/environments/snapshot.yml index 3f14fe74810..9c988fba8ed 100644 --- a/testing/environments/snapshot.yml +++ b/testing/environments/snapshot.yml @@ -3,7 +3,7 @@ version: '2.3' services: elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch:8.4.0-03bd6f3f-SNAPSHOT + image: docker.elastic.co/elasticsearch/elasticsearch:8.4.0-533f1e30-SNAPSHOT # When extend is used it merges healthcheck.tests, see: # https://github.com/docker/compose/issues/8962 # healthcheck: @@ -42,7 +42,7 @@ services: - ./docker/logstash/pki:/etc/pki:ro kibana: - image: docker.elastic.co/kibana/kibana:8.4.0-03bd6f3f-SNAPSHOT + image: docker.elastic.co/kibana/kibana:8.4.0-533f1e30-SNAPSHOT environment: - "ELASTICSEARCH_USERNAME=kibana_system_user" - "ELASTICSEARCH_PASSWORD=testing" From 6c2d91aca140d6ed6a91a19c426a8676036e40d1 Mon Sep 17 00:00:00 2001 From: Aleksandr Maus Date: Wed, 6 Jul 2022 15:38:07 -0400 Subject: [PATCH 037/180] Osquerybeat: Fix osquerybeat is not running with logstash output (#674) --- internal/pkg/agent/program/supported.go | 2 +- internal/spec/osquerybeat.yml | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/internal/pkg/agent/program/supported.go b/internal/pkg/agent/program/supported.go index 50140f27617..fa35f4a4fe3 100644 --- a/internal/pkg/agent/program/supported.go +++ b/internal/pkg/agent/program/supported.go @@ -27,7 +27,7 @@ func init() { // internal/spec/metricbeat.yml // internal/spec/osquerybeat.yml // internal/spec/packetbeat.yml - unpacked := packer.MustUnpack("eJzce1mToziX9v33M/r2m5lmSWc3E/FeGLLY7CTLuBIJ3SHJCdgCu9PGNp6Y/z4hsRgwuVR29ztvzEVGVGGh5egsz3nO4b9+SfPD6jWP2K/73Yr8Gu2yf9+vXo+r1/8oM/bLf/6CM/OAfmzjRaB788BjJEeMxLs1Bot7xzJPeClfEHQVBJ1ZCF0pAigJ1dHfcnLZxuC0jR3DOfhLZ+8Y7iEEkwQpwQGBiTTPgiIE7h6BhUZtV0ZLZ2+k09hJZdNJT7GT0TVUdUYyj+F8obn2QX/+Jv/wAxf4gfviS5q9uGzPjw+65sQ7amTBHbG0klrBBioyo7a7C9XHe8fczxxjmoZQP8xhfabU2RtMmpE82CP4eM/XnS/1NVb1CVT9I1TOO6IuxHPHmMaOxSQEpHvHQnsEAql9bvvHp1Tf4VyXqf04E8+MaYyVyUuoaAXKzrtKPpMjVqf894NjyQl52LZjiWVK0cM2RtmZIbi4Pu/srXk2X+olAvKRZsFLpASTp3jb/lb96a8Ibvh9rEMluBBZS4jFxNgvzWO7rJIpK9CpO0aKSRYcsIoYVA5s9eN6nuZPzJvq/L4LOt2Kd1DG7qDqSSQLEvxjG69UqZYJ2mHbZ4RpSgjOcu/ctsewFayppZVjsq7XkVZQZ9d3UILtgJFLb18HoaeLdi97agXl9ez6BYEzC1X/SPIbud+sW82nydTW5ep8V9l07vLgWKyIsmBNTW2LgLlB0L08pfpvL4udGllB8ZTqewQmObXirWsf6nU8bbac/n/nYRqHYLJxrCQh0oGtlvFmpdRr2tLeMSjDlnmhFlsTJUhI5m3d8hS7qsuQxS5ueeJ7yCPFzCLlWz43pjm2tJyofkKUOJ8ttv/45d+GXqGg6QGvosPQKcBgE0E/RcCUjMw7oIdtHPaemSXqGNwcPKZzYXjXMfMs2IfQkyLweAgB5YZdrIC8d9JT9c7zs3iHKIFE4bSIwPnQdRQoM/dEeU7n055juSDoy8SYSAjIJ2yZElpOGM7MFFvB5jvgyuCx4RoYmIXYAwgKanTGQ7+k4GaNPVZoHoFJPs/OjGbB/jvwWZgH+XBepHgvxArKkCv+g7RZSWbgf2PPi02gB6Zm/5Dow9P62+nRloZrJBT4O2q5LISLonIgLJ9n8hHZwhkUCEySkCvXg5yF4HxBS2cGF7uE5P4OZeaa8jvJgoTaj8fe3eQuE5ee+y/cOWE1kIgdSFDxtiGY5Px+hRNebDtOcdxgjfQxXoDz3vlmyshikljPqA3EqAyRlHqKAN3hVFcj6G8dg1ZnM37P5/F2xOC9F6owKTK1EgHKVva0NsDGEfkTYj1Xxme77Cmd5jCr1p+Xd69u7ZyIEuwR8CSsOvfc8PgZyWk7c0pdR5Z/cQy6w5nPVg/bWBhCKf+GLFMKA+1CbZeFQCrE/6HHqC0dVoqfUMt8IapfImAe5tkkwSC4EMtcIyjlPAA4VpCEShxHYHKicFFwuUVg8gffB1RYgazgrnEy1GYnLmuxdndftq/iUk+Q5b+gjDEsnuklVjxGVI87JO6Ejjh7jlGmlY4VKFUQFfu7ILioHYVWoKWeYdXhc29C6Cft/S0n4v/ctubGzW/i3uaGnuBsEQ9lInwA9E4h8Fh1f61Tq8fIL8R2jyLoKFrrVG8DISuIEpTU1BKU+4y8LZcHrEwUBF2puUcgS0VzD0Q+UP6cWr/Xcvcv/HehszCRCLcFU9tHUNwB9wuXZk9NwG3eq/VsR63g8JTqzXma5xK+dMaKs3cCuK0n1Iq7ujYWvNfOt+r3So+Di2OhI0l1vWPv3G7zCNzFwg6MaV7Z7+LolnexDxEjOZMiHoi5nnDZpboUWSaXz6VdxxbykRBYxCF8jKmVMMeq/cdSL0Ig8/trghqXU0GUc0Kt4A3764MUcU+LTrC7scW4mJWbvPuumPdhG7sP32Yd8CD20gMeFiqxIvVt3bjeAQbahoIz68g0c6xvcWuvhr4jpX4hVrCOANoJGShaQbOg5H6plpXQGVdp7MbnwRi7AuDJ1C3v2j1iYL6iQEtI7iZ9gNPqSmtPvfNav9871nX++rcDlxUGmoICTfi07js1iBnO95l32rj1s+u0tr/YfgwKLQHyhDz5PUdAK9p7MF1GYLAj2XMc8rhgeUecoR0qdQmX+horMnNsf0syTUZKXOnkNzPn/ssxkgKX+h4rXoINPYvAmZFyU/zEWaq4Ymol1w2ooB22gtpO3wKVHaC3vvEDNYCubaaWDc3MPQXBpSMTDiaFzqNp95lY/yUCHIvQMoJ+B3hLMVHZheuOEW/Xzjdzg+zHmNr06FjmBplaFoFgz591fSkpBdBs/Hrc4KYIjPr8FhBCpZLxPGObuTHyPPeKEOqXyNKk8d+5z/F2WLkbxLTqdxHbst/vHfugVfL2jsRu7kjPSKYdbgB7lTB1cc6Mzz30Byh3jxxv9LFj38dWdtZio1mNV3qg/ooH3Pbu5sv++q2915io1T2u81C8y6r7qmOeobdAvvEnlZ+tgbnBdX6SRYDKJDOFrQxi5iAR2M6okjC85vEvKJDqb2eG/1v/7M05/1RiIJIBkvF5H0Uy8FOJAWHbgo4lBsr5SMEgMeg9GyQG/1Igf0c/D44HQLt7xngnHFCdcQpHsGrBkVQZSVpnktmOOx/uJDYIxp1MVwS+e8c6H5HKFbo/vgE/UO0Aw/X2HdZhHGw5NUjnmSdWKHeWAohi69S7ywbUXEGLy5DCCsemO2rFsat4e6wGmyoBIEW9X+7EJQTdl8ZpQpWPT15IFuQIJqdOxn0L7rnRZs/3jkFXXfnO023+ESMwlFflKIWcMw4gquSlu0dnlGW4ZQ36jufGYcT/lEw+dpVDgrJDUv2b64jHgc/WLTezG2Nd5XS3TfOhrfpgsiGWtsP5In5WgjWF7o7am1moyJsKTPgnrLCCGvIFAU8mGZNWA71H1zkOyAoUIxes2i5Unu+dh1B9eohnIfCE4+NBQtiSWunQk3CEQYFs/ciDpJGdj0jWTiH0t5XcOThx1Qjc3TuGc/xhsZRkZrlaamYjn7l0fX8uZOCzOZdHqXX2L/0x53OXDp9zH4GJzJM+J9WOxF4cfXBOiOrvwlIzr+9oFyr8irbHCjl2zzlLJ/xZyvWFiqROU8llGzubx3tonhck03KSmQfnWwVAoHlu9yv+3axhngn3NdQKCOR2Lp/J6DqZt0XAexXyU/0EW6f7OrlhoawJoDRMaJysIxfosVANBABx6nE1K9sGPIcH+Ixlq6VzfZZKBx4InDZYTlOi+jxYlc0zarEDAprMdeHxMp0RS7tQk+/fkzgArO/4DgGPJ7N79GOQLBnuuJ41+7CqeNEGYsNt5+7ua76U2zupx12o5TOSO51nzmEOgxNS3QRZz4PnLiOKJpPMY6TsyOANOfbHT+4jOE2bJDECMuP+4CmdKo8P0xmxXQbVoIjAhOvUHj9sZ/OlzlZWsK5A6nMDlITuP6XTtKsH5GqbzRoJyWgfgBqujLNWP9Iu0Bze47h8Rvb9IflzBVlQHbCs7zG1dQLRTSCHoJrLp9WLqZDXTSy5BXbOTfI9nhjpQ2b5wJNlCp7FmTAwT0N76jHytitjq7fXD5MlEXuzoOwlI+tbWXVtsl8FkAbJyS3jLPYB0Y4o7IjjcSBZzekP4tCZ4YxKkcHjUC0/Vdo5D3fxY00CRZZ5WSrBhM/RxLqXJY89DYnrXRAwy1CJRwDkjoNgjn8SbkfcV+JMkxy+nurKOPd3GDwXIXTXkS3F339IsauYJf4RSm5Zre/ah5KCidDReYYSDNh+BeuxIi4mHHdU5zH830geFMI3LSeHEOyOJK/HXkg+W05vY+RLylYjeNbnPgtUyUEdF4W/FThnuqvsMNVxr1qVe4zawWmesT3+BC5tsC9PBol1Tqj1PEbuDipicoIznnzJIgHsjJdIHtxiX0vLOV5D5WSPIGL4Qd4g4Mqo/LDSZi2fz+YnCOurXIxJnfSQoiFqSDkiB1GpM0tqsYxjhKdUF/iOlFpXxiWCPK67ElQ4hqh13tTWkWUWiOOM2nY/8ldv4+0bDPy3k5rzpX4I4XTg94cJ8o2fqaqSirnHpiZhuSI1e/6mSqoHxOWHlcgGg7OV5TFiL3h+lrYEaynut72/Ssd7hHQKF4M9jlVOuT+GPuP30SM1Tl/dfyvrFAF0JNnzuwTCgNzpkbntWU91bLb9MhQEBz+D/yL2fSWNBvflDYniv5NAFrEqBJOLyHv7ZGSzr34FlMsOmCfSq3DWz7lfqHCZ+Hf3Ha4PNOO5co0xhX2QfnW3KT6onoQsVogzW/4OD9YRPidfHKnlnfj+wp7edecJNuRhG1PgnwZ7qXC4pa0jJeCYYoMV7xVBZzCPwP4lAv6OyNoFW5rKz/WU6tWz0+3556o3ITxnuGwFKdeRw2dIR5kowcsNYfQ3Eq/vFWS+vH6nIPSnCdNegW5YfZfiENLLHFR8AVVMKVTi/m/wsYNxzHsKXTYHlX5EdnDF7VY1B8o0GWd+uer6IGUihYAVV/2Q4kZ3ru/TNTHkEwXuhYLruMj2JWJqXfzZzLultn+Kcu+Irz75D6wGJc7MPbqOew0Beg2v++nYjRRjaEqdOQsE/Yzr6XVfv1+8dVARk11s19Hf+bJvZ/z/K+j11uG2dtWHYEOuc/Fc4e76G91hnh9f567I99on/nkCveXz3tSJpuD8MWcjxaGindoiTFMossYIYZF/15zcz+DuK+b7JCH804RuZ42ikdnLchN/T6cnh+MaQ9+G0JsjuNm69uFIoc/HaBxbo4avUD0mMLNB9o5BK/0oicBzP8UPvbDV6jDe/eVXOUj83OQvNadb5yaHNq+58ro8F686fQz5gBWfOUwadIhVnVNtB9Zn+NK436lzk29+kAe+xS+OFHO6udehk7f9NesPbeGdPTQ28a4/r2PClX+s9tnsBSqiKPEWtxmDUs+wFTBqTJqOvaKZ69qg0+Z2MWwLvi3WqHVz0fCpjT8QhS88Kh9hG7jVg7zpxJucsMKx3UYUtEbWavBx8Wi0Y69cr5jHf0FWkIUw2FN7vHPsltO92ccWq5406Pq6kZPohhvv9ioavZlnXP5VfICqfiT54qO1L0Q53XQENj5ivp4WQ53s8hUdWVX7bfend22su37fJ3b+xgpm/T+pKUDevFvNnRyJ2upFJ995G+u+uc938EzXDij0WJsjTr/UpTiKy/7UHMuJyEeIqieh8vylc73ZmPS1M17zkvhLHZAD3ZqO8VPN2KF/uObgVpAgKxB+SMT2nG4RuLvv81GVfQxi419cWxmJickqeh3rfFxaQULyfoEz6j3rBMLPFje/QAb9TCv1pzoXb4I0Byf+Za4kHGRIXydjakAJk7ZoWQOj8YAqa6JLEKouQ0pw122PHm/vdXliuiIqd7QJExdb/l7MTiOKPACo75HK7wPbjmE2d3N1tLdty02HVR6wzxQm33LMbzvkj42v75g74HXU6P4aErjtLhCyoVVStBRgVRBt1DIvkUF2RvyP2w6DbHV4TcmIBf4AgUQytq41sv4goW7yV2qqdvyjg6ZtYIct6WOKtaFxc59hqO9FafHj1oSf/LDhfEQKT79IgQUFctKQFaQUkOG8eShrJwTdNZ/3+9L/7cdz8Py8YQ+foGYPCPplBLy6f6TpyZpweC4oxrFzGW96hL6sSRbwmyypqR0x05pU5SVUkgRnlFtnpfl5S2e+0RLxiV7Z+u5rmu5z7RAW91TizPuooplbqBgCJLUUQdty0X4w8lfRie9QvTfU4QD+1OluoCXYOr9QS3vBFrvQh+1NT2RnzQ58uDl7gRWtR60hmKwR1CUByfMassCqXz8Ci6qPvbEVQ+jTiWTaGkHvwj0RjxyDvY5T082HM/1e5q+e43qHWZBd0/DgjtpuwnVCyMnUBBV7LUsOYKfannfW3+Pg+eDjntHS51uRa6TcKdpazLtiVmqNbV7c6ft96//bJdCBvt/oKZc7zj3hJ99u8+ncwW1vb8ltHKbxr88PZxHlv6d3r7PlrYyqeUTr0b1j+F1EUEHqKlZ05256EG+o8V7v4JWyO4TgwKBiliQzJ6N63PqJAQyvdKXd80/2/XbeW8w+hvVv0Qqfeaf/PcNfQh1/fY6RFOkTZ7CDCwq0Torxl/am9uKmeHeoWy392IuJxVhsaEuhPVt2bmnUvGOTA1zTyOVdOrIng4SFwGd8zur8o+1pA3zwL9Bfut3/UaxeyzH4p3pnCoJy1a/KH4lqygi6k2Fl/ieq8j8P/X6mu/RT1Xj36x2o9adeeCifd7vvtAuBASP5Zva1LrPmfVZ1iX62u8z2dxgERwoX987Dt1O3Y/aNZLHLsDefbQ1gW9Mp21H/xr0EWgeytCbH4aroVMAWEzKMgGD3uOvIeGh/uzvg76uOtff3yWrIJ9vcB9+s/jM6VjezUNHEZ1ohYAW1H+8HnTli3MtymnxfTvPHS6y4pxFXsIvIZjVGxTxb5jpSAqmXCNo8gBxY09DfJoIlOfhV6vlBEsjH3Ix9t89c1FhK2axqLe/35fTHvpn85W99yEn6Z/46HfMnaY/+x19vUh6nEHivaIT7+z/Wj/3Rl9WzX/77//1PAAAA//9rUQ6t") + unpacked := packer.MustUnpack("eJzce0mTqziX9v77GbX9ursY0llFR7wLQyaTnWQZXyOhHZKcYFtgV9rYxh393zskBgMmx7r19hu9yIh7sdBwdIbnPOfwX7+sssPyNYvYr/vdkvwa7dJ/3y9fj8vX/yhS9st//oJT84B+bONZoHvTwGMkQ4zEuzUGs3vHMk94Ll8QdBUEnUkIXSkCKAnVwd8yctnG4LSNHcM5+HNn7xjuIQSjBCnBAYGRNE2DPATuHoGZRm1XRnNnb6zGsbOSTWd1ip2UrqGqM5J6DGczzbUP+uJR/uEHLvAD98WXNHt22Z6fHnTNiXfUSIM7YmkFtYINVGRGbXcXqk/3jrmfOMZ4FUL9MIXVmVbO3mDShGTBHsGne77udK6vsaqPoOofoXLeEXUmnjvGOHYsJiEg3TsW2iMQSM1z2z8+r/QdznSZ2k8T8cwYx1gZvYSKlqP0vCvlMzpidcx/PziWnJCHbTOWWKYUPWxjlJ4ZgrPr89be6mfTuV4gIB9pGrxESjB6jrfNb+Wf/orght/HOlSCC5G1hFhMjP3WPLbLSpmyHJ3aY6SYpMEBq4hB5cCWP67nqf/EvCud33dOx1vxDkrZHVQ9iaRBgn9s46UqVTJBO2z7jDBNCcFZ7pzb9hi2gjW1tGJI1tU60hLq7PoOSrAdMHLp7Osg9HTW7GVPraC4nl2/IHBmoeofSXYj95t1y/k0mdq6XJ7vKpvWXR4ci+VRGqypqW0RMDcIupfnlf7by2ynRlaQP6/0PQKjjFrx1rUP1TqeNpmP/7/zMI5DMNo4VpIQ6cCW83izVKo1bWnvGJRhy7xQi62JEiQk9bZucYpd1WXIYhe3OPE9ZJFippHymE2NcYYtLSOqnxAlziaz7T9++be+V8jp6oCX0aHvFGCwiaC/QsCUjNQ7oIdtHHaemQVqGdwUPK2mwvCuY6ZpsA+hJ0Xg6RACyg07XwJ576xO5TuLhXiHKIFE4TiPwPnQdhQoNfdEWaym445juSDoy8QYSQjIJ2yZEpqPGE7NFbaCzR+AK4PH+mtgYOZiDyDIqdEaD/2Cgps19lihWQRG2TQ9M5oG+z+Az8IsyPrzIsV7IVZQhFzxH6TNUjID/5EtZptAD0zN/iHRh+f14+nJlvprJBT4O2q5LISzvHQgLJum8hHZwhnkCIySkCvXg5yG4HxBc2cCZ7uEZP4Opeaa8jtJg4TaT8fO3WQuE5ee+S/cOWE1kIgdSFDxtiEYZfx+hROebVtOcdhgjdVTPAPnvfNoyshikljPqAzEKA2RFPoKAbrDK12NoL91DFqezfg9m8bbAYP3XqjCpMjUCgQoW9rjygBrR+SPiLUojc922fNqnMG0XH9a3L26lXMiSrBHwJOw6txzw+NnJKftxCl0HVn+xTHoDqc+Wz5sY2EIhfwbskwpDLQLtV0WAikX/4ceo7Z0WCp+Qi3zhah+gYB5mKajBIPgQixzjaCU8QDgWEESKnEcgdGJwlnO5RaB0Z98H1BhObKCu9rJUJuduKzF2u192b6KCz1Blv+CUsaweKYXWPEYUT3ukLgTOuJ0EaNUKxwrUMogKvZ3QXBWOQotR3M9xarD596E0E+a+5uPxP+5bU2Nm9/EvU0NPcHpLO7LRPgA6J1C4LHy/hqnVo2RX4jtHkXQUbTGqd4GQpYTJSioqSUo8xl5Wy4PWBkpCLpSfY9AlvL6Hoh8oPw5tX6v5O5f+O9CZ2EiEW4LpraPoLgD7hcu9Z7qgFu/V+nZjlrB4Xml1+epn0v40horzt4K4LaeUCtu69pQ8F47j+XvpR4HF8dCR7LS9Za9c7vNInAXCzswxllpv7OjW9zFPkSMZEyKeCDmesJlt9KlyDK5fC7NOraQj4TALA7hU0ythDlW5T/meh4Cmd9fHdS4nHKinBNqBW/YXxekiHuatYLdjS3G+aTYZO13xbwP29h9eJy0wIPYSwd4WKjAitS1deN6BxhoGwrOrCXT1LEe48ZeDX1HCv1CrGAdAbQTMlC0nKZBwf1SJSuhM65S243PgzF2BcCTqVvcNXvEwHxFgZaQzE26AKfRlcaeOue1fr93rOv81W8HLisMNAUFmvBp7XcqENOf7zPvNHHrq+s0tj/bfgwKLQHyhDz5PUdAy5t7MF1GYLAj6SIOeVywvCNO0Q4VuoQLfY0VmTm2vyWpJiMlLnXy0cy4/3KMJMeFvseKl2BDTyNwZqTY5F84SxlXTK3gugEVtMNWUNnpW6CyBfTWN36gAtCVzVSyoam5pyC4tGTCwaTQeTRuPxPrv0SAYxFaRNBvAW8pJiq7cN0x4u3aeTQ3yH6KqU2PjmVukKmlEQj2/Fnbl5JCAM3ar8c1borAoM9vACFUShlPU7aZGgPPMy8PoX6JLE0a/p37HG+HlbteTCt/F7Et/f3esQ9aKW/vSOz6jvSUpNrhBrCXCVMb50z43H1/gDL3yPFGFzt2fWxpZw02mlR4pQPqr3jAbe5uOu+u39h7hYka3eM6D8W7rLyvKuYZegPka39S+tkKmBtc50dpBKhMUlPYSi9m9hKB7YQqCcNrHv+CHKn+dmL4v3XPXp/zLyUGIhkgKZ/3SSQDX0oMCNvmdCgxUM5HCnqJQedZLzH4lwL5O/p5cNwD2u0zxjvhgKqMUziCZQOOpNJIVlUmme648+FOYoNg3Mp0ReC7d6zzEalcobvja/AD1RYwXG/fYR2GwZZTgXSeeWKFcmcpgCi2Tp27rEHNFbS4DCksd2y6o1Ycu4q3x2qwKRMAklf75U5cQtB9qZ0mVPn45IWkQYZgcmpl3Lfgnhtturh3DLpsy3e62mYfMQJ9eZWOUsg55QCiTF7ae3QGWYZb1qDreG4cRvxPyeRjVzkkKD0k5b+5jngc+GzdYjO5MdZlRnfbVda3VR+MNsTSdjibxQslWFPo7qi9mYSKvCnBhH/CCsupIV8Q8GSSMmnZ03t0neOArEAxMsGq7UJlce88hOrzQzwJgSccHw8SwpbUUoeehSMMcmTrRx4kjfR8RLJ2CqG/LeXOwYmrRuDu3jGc4w+LrUhqFsu5ZtbymUrX96dCBj6bcnkUWmv/0p9TPnfh8Dn3ERjJPOlzVtqR2LOjD84JUf1dWGjm9R3tQoVf0fZYIcf2OSerEX+24vpCRVKnqeSyjZ3N0z00zzOSahlJzYPzWAIQaJ6b/Yp/12uYZ8J9DbUCArmdy2cyuE7qbRHwXoX8VD/B1um+Sm5YKGsCKPUTGidtyQV6LFQDAUCcalzFyjYBz+EBPmXpcu5cn62kAw8EThMsxyui+jxYFfUzarEDAprMdeHpMp4QS7tQk+/fkzgArO74DgGPJ7N79KOXLBnusJ7V+7DKeNEEYsNt5m7vazqXmzupxl2o5TOSOa1nzmEKgxNS3QRZi95zlxFFk0nqMVK0ZPCGHLvjR/cRHK/qJDECMuP+4Hk1Vp4exhNiuwyqQR6BEdepPX7YTqZznS2tYF2C1EUNlITuP6/Gq7YekKtt1mskJKVdAGq4Mk4b/Vi1gWb/HoflM7DvD8mfK8iCao9lfY+prRKIdgLZB9VcPo1ejIW8bmLJLbBzbpLv4cRI7zPLB54sU7AQZ8LAPPXtqcPI266Mrc5eP0yWROxNg6KTjKxvZdW2yW4VQOolJ7eMs9gHRDuisCOOh4FkOaffi0NnhlMqRQaPQ5X8VGnnPNzFTxUJFFnmZa4EIz5HHete5jz21CSud0HALEIlHgCQOw6COf5JuB1xX4lTTXL4eqor48zfYbDIQ+iuI1uK//ghxa5iFvhHKLlFub5rHwoKRkJHpylKMGD7JazGiriYcNxRnsfwfyNZkAvfNB8dQrA7kqwaeyHZZD6+jZEvK7YcwLM+91mgTA6quCj8rcA5411physdd6pVmceoHZymKdvjT+DSGvvyZJBY54RaiyFyt1cRkxOc8uRLFglga7xEsuAW+1paxvEaKkZ7BBHDD/IGAVdGxYeVNmu+OJufIKyvcjFGVdJD8pqoIcWAHESlziyoxVKOEZ5XusB3pNDaMi4Q5HHdlaDCMUSl86a2jiwzRxxnVLb7kb96G2/fYOC/ndSczvVDCMc9v99PkG/8TFmVVMw9NjUJyyWp2fE3ZVLdIy4/rETWGJwtLY8Re8bzs1VDsBbifpv7K3W8Q0iv4Ky3x6HKKffH0Gf8Pjqkxum7+29kvUIAHUm6eJdA6JE7HTK3Oeupis22X4SC4OBn8F/Evq+kUe++vD5R/HcSyCJWhWB0EXlvl4ys99WtgHLZAfNEOhXO6jn3CyUuE/9uv8P1gaY8V64wprAP0q3u1sUH1ZOQxXJxZsvf4d46wudksyO1vBPfX9jRu/Y8wYY8bGMK/FNvLyUOt7R1pAQcU2yw4r0i6PTmEdi/QMDfEVm7YEtT+bmeV3r57HR7/qnqjQjPGS5bQcq15PAZ0lEmSvByQxj9jcTrewWZb6/fKgj9ZcK0U6DrV9+lOIT0MgUlX0AVUwqVuPsbfGphHPOeQpdNQakfkR1ccbtVzoFSTcapXyzbPkgZSSFg+VU/pLjWnev7dE0M+USBe6HgOi6yfYmYWht/1vNuqe2fosw74qtP/hOrQYFTc4+u415DgF7D635adiPFGJpSa84cQT/lenrd1+8Xbx2UxGQb27X0dzrv2hn//xJ6nXW4rV31IdiQ61w8V7i7/kZ3mOfH17lL8r3yiX+dQG/4vDd1oi44f8zZSHGoaKemCFMXiqwhQljk3xUn9xXcfcV8nySEv0zottbIa5m9zDfxH6vxyeG4xtC3IfSmCG62rn04UujzMRrH1qjmK1SPCcxskL1j0FI/CiLw3Jf4oRe2XB6Gu7/8MgeJF3X+UnG6VW5yaPKaK6/Lc/Gy08eQD1jxmcOkXodY2TnVdGB9hi+Nu506N/nmB3ngW/ziQDGnnXsdWnnbz1m/bwvv7KG2iXf9eRUTrvxjuc96L1ARRYm3uM0YFHqKrYBRY1R37OX1XNcGnSa3i2FT8G2wRqWbs5pPrf2BKHzhQfkI28CNHmR1J97ohBWO7TaioDWwVo2P8yejGXvlesU8/guygjSEwZ7aw51jt5zuzT62WPWkXtfXjZxEN9xwt1de68005fIv4wNU9SPJZh+tfSHK6aYjsPYR0/U47+tkm69oyarcb7M/vW1j7fW7PrH1N1Qw6/5JdQHy5t1y7uRI1EYvWvnO21j3zX2+g2fadkChx5occfytLsVBXPaX5piPRD5CVD0JlcW3zvVmY9L3znjNS+JvdUD2dGs8xE/VY/v+4ZqDW0GCrED4IRHbM7pF4O6+y0eV9tGLjT+5tjIQE5Nl9DrU+Ti3goRk3QJn1HnWCoSfLW5+gwz6Siv1pzoXb4I0Byf+ZaokHGRI3ydjKkAJk6ZoWQGj4YAqa6JLEKouQ0pw126PHm7vdXliuiQqd7QJExdb/J5PTgOK3AOo75HK7wPblmHWd3N1tLdty3WHVRawzxQm33LMbzvkj42v65hb4HXQ6H4OCdx0FwjZ0DIpmguwKog2apmXyCA7I/7HbYdBujy8rsiABf4AgURStq40svogoWryVyqqdvijg7ptYIct6WOKtaZxM59hqO9FafHj1oQvfthwPiKFp18kx4ICOWnIClYUkP68WShrJwTdNZ/3j7n/249FsFhs2MMnqNkDgn4RAa/qH6l7skYcnguKcehcxpseoStrkgb8JgtqakfMtDpVeQmVJMEp5dZZan7W0JlvtER8ole2uvuKpvtcO4TFPZU48z4qaeYGKoYASQ1F0LRcNB+M/Cw68R2q94Y67MGfKt0NtARb5xdqaS/YYhf6sL3piWyt2YIPN2fPsaJ1qDUEkzWCuiQgeVZBFlj260dgVvax17ZiCH06kVRbI+hduCfikaO312Fquv5wptvL/N1zXO8wDdJrGh7cUdtNuE4IOZmaoGKvZcke7FSb8066e+w9733cM1j6fCtyDZQ7RVuLeZdPCq22zYs7fr9v/X+7BNrT9xs95XLHmSf85NttPq07uO3tLbiNw1X86+LhLKL8H6u718n8VkblPKL16N4x/DYiKCF1GSvac9c9iDfUeKd38ErZHUJwYFAxC5Kao0E9bvxED4aXutLs+Yt9v633ZpOPYf1btMJn3ul+z/BTqOPvzzGQIn3iDHZwQYHWSjF+am9qJ26Kd/u61dCPnZiYD8WGphTasWXnlkbNWjbZwzW1XN6lIzsySFgIfMbnLM8/2J7Wwwf/Av2l2/2f+fK1GIJ/qnemICiW3ar8kaimjKA76lfmv1CV/zr0+0p36aeq8e73O1CrT71wXz7vdt9pFwIDRrLN5HtdZvX7rOwS/Wx3me3vMAiOFM7unYfHU7tj9o1ksc2w159t9WBb3SnbUv/avQRaC7I0JsfhquhUwBYTMoyAYPe460h5aH+7O+Dvq4419/fJasgn29x736z+0ztWP/j29LZDqNvxw5PKRHOMR80xyOX5IcwGk8ZdRDbLIdpmYZnrSAmkTtJo82BzYHXzf5M0FuTgl2nqBwkjH3Mz9t2edCGVQjbLusz7PTzdsW8mitlbH32S7pm/T938RYqk+6HYm/TIKQTeKxrgCf+P9W5/9BX25Jf//n//EwAA//82Bxyl") SupportedMap = make(map[string]Spec) for f, v := range unpacked { diff --git a/internal/spec/osquerybeat.yml b/internal/spec/osquerybeat.yml index 36e60901a34..6a7f3be562e 100644 --- a/internal/spec/osquerybeat.yml +++ b/internal/spec/osquerybeat.yml @@ -35,5 +35,6 @@ rules: - inputs - output -when: length(${inputs}) > 0 and hasKey(${output}, 'elasticsearch') +when: length(${inputs}) > 0 and hasKey(${output}, 'elasticsearch', 'redis', + 'kafka', 'logstash') constraints: ${runtime.arch} != '386' From a18cfadcd38fef27be0307ae5307b562ceed44fc Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Thu, 7 Jul 2022 01:41:51 -0400 Subject: [PATCH 038/180] [Automation] Update elastic stack version to 8.4.0-d0a4da44 for testing (#684) Co-authored-by: apmmachine --- testing/environments/snapshot.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/testing/environments/snapshot.yml b/testing/environments/snapshot.yml index 9c988fba8ed..f2f3b3b330f 100644 --- a/testing/environments/snapshot.yml +++ b/testing/environments/snapshot.yml @@ -3,7 +3,7 @@ version: '2.3' services: elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch:8.4.0-533f1e30-SNAPSHOT + image: docker.elastic.co/elasticsearch/elasticsearch:8.4.0-d0a4da44-SNAPSHOT # When extend is used it merges healthcheck.tests, see: # https://github.com/docker/compose/issues/8962 # healthcheck: @@ -42,7 +42,7 @@ services: - ./docker/logstash/pki:/etc/pki:ro kibana: - image: docker.elastic.co/kibana/kibana:8.4.0-533f1e30-SNAPSHOT + image: docker.elastic.co/kibana/kibana:8.4.0-d0a4da44-SNAPSHOT environment: - "ELASTICSEARCH_USERNAME=kibana_system_user" - "ELASTICSEARCH_PASSWORD=testing" From fe7881952826fa19c54e6eb32a3aa4ae0925e58e Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Mon, 11 Jul 2022 01:37:29 -0400 Subject: [PATCH 039/180] [Automation] Update elastic stack version to 8.4.0-dd98ded4 for testing (#703) Co-authored-by: apmmachine --- testing/environments/snapshot.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/testing/environments/snapshot.yml b/testing/environments/snapshot.yml index f2f3b3b330f..bc05ed04fa0 100644 --- a/testing/environments/snapshot.yml +++ b/testing/environments/snapshot.yml @@ -3,7 +3,7 @@ version: '2.3' services: elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch:8.4.0-d0a4da44-SNAPSHOT + image: docker.elastic.co/elasticsearch/elasticsearch:8.4.0-dd98ded4-SNAPSHOT # When extend is used it merges healthcheck.tests, see: # https://github.com/docker/compose/issues/8962 # healthcheck: @@ -42,7 +42,7 @@ services: - ./docker/logstash/pki:/etc/pki:ro kibana: - image: docker.elastic.co/kibana/kibana:8.4.0-d0a4da44-SNAPSHOT + image: docker.elastic.co/kibana/kibana:8.4.0-dd98ded4-SNAPSHOT environment: - "ELASTICSEARCH_USERNAME=kibana_system_user" - "ELASTICSEARCH_PASSWORD=testing" From d1d56a9d46751b9fcbbf2b77cf3eb0349032fc6a Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Tue, 12 Jul 2022 01:49:59 -0400 Subject: [PATCH 040/180] [Automation] Update elastic stack version to 8.4.0-164d9a10 for testing (#705) Co-authored-by: apmmachine --- testing/environments/snapshot.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/testing/environments/snapshot.yml b/testing/environments/snapshot.yml index bc05ed04fa0..218ea14d077 100644 --- a/testing/environments/snapshot.yml +++ b/testing/environments/snapshot.yml @@ -3,7 +3,7 @@ version: '2.3' services: elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch:8.4.0-dd98ded4-SNAPSHOT + image: docker.elastic.co/elasticsearch/elasticsearch:8.4.0-164d9a10-SNAPSHOT # When extend is used it merges healthcheck.tests, see: # https://github.com/docker/compose/issues/8962 # healthcheck: @@ -42,7 +42,7 @@ services: - ./docker/logstash/pki:/etc/pki:ro kibana: - image: docker.elastic.co/kibana/kibana:8.4.0-dd98ded4-SNAPSHOT + image: docker.elastic.co/kibana/kibana:8.4.0-164d9a10-SNAPSHOT environment: - "ELASTICSEARCH_USERNAME=kibana_system_user" - "ELASTICSEARCH_PASSWORD=testing" From bda217dd48e1cc6ca1d2b338ec67dd495d7aa769 Mon Sep 17 00:00:00 2001 From: Michel Laterman <82832767+michel-laterman@users.noreply.github.com> Date: Tue, 12 Jul 2022 11:49:39 -0700 Subject: [PATCH 041/180] Add missing license headers (#711) --- internal/pkg/core/status/handler.go | 4 ++++ internal/pkg/testutils/status_reporter.go | 4 ++++ 2 files changed, 8 insertions(+) diff --git a/internal/pkg/core/status/handler.go b/internal/pkg/core/status/handler.go index 2e7476901c5..e82f73fb216 100644 --- a/internal/pkg/core/status/handler.go +++ b/internal/pkg/core/status/handler.go @@ -1,3 +1,7 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + package status import ( diff --git a/internal/pkg/testutils/status_reporter.go b/internal/pkg/testutils/status_reporter.go index a045e50304a..45448aa53b2 100644 --- a/internal/pkg/testutils/status_reporter.go +++ b/internal/pkg/testutils/status_reporter.go @@ -1,3 +1,7 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + package testutils import ( From be332390064f924c00b5668785aea81b22af6e70 Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Wed, 13 Jul 2022 01:48:41 -0400 Subject: [PATCH 042/180] [Automation] Update elastic stack version to 8.4.0-00048b66 for testing (#713) Co-authored-by: apmmachine --- testing/environments/snapshot.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/testing/environments/snapshot.yml b/testing/environments/snapshot.yml index 218ea14d077..0d7fb81c508 100644 --- a/testing/environments/snapshot.yml +++ b/testing/environments/snapshot.yml @@ -3,7 +3,7 @@ version: '2.3' services: elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch:8.4.0-164d9a10-SNAPSHOT + image: docker.elastic.co/elasticsearch/elasticsearch:8.4.0-00048b66-SNAPSHOT # When extend is used it merges healthcheck.tests, see: # https://github.com/docker/compose/issues/8962 # healthcheck: @@ -42,7 +42,7 @@ services: - ./docker/logstash/pki:/etc/pki:ro kibana: - image: docker.elastic.co/kibana/kibana:8.4.0-164d9a10-SNAPSHOT + image: docker.elastic.co/kibana/kibana:8.4.0-00048b66-SNAPSHOT environment: - "ELASTICSEARCH_USERNAME=kibana_system_user" - "ELASTICSEARCH_PASSWORD=testing" From 28721ebe888b377c36b43c7dbf94978e3d16ba9c Mon Sep 17 00:00:00 2001 From: Michel Laterman <82832767+michel-laterman@users.noreply.github.com> Date: Wed, 13 Jul 2022 11:29:18 -0700 Subject: [PATCH 043/180] Allow - in eql variable names (#710) * fix to allow dashes in variable names in EQL expressions extend eql to allow the '-' char to appear in variable names, i.e., ${data.some-var} and additional test cases to eql, the transpiler, and the k8s provider to verify this works. Note that the bug was caused by the EQL limitation, the otehr test cases were added when attempting to find it. * Regenerate grammer with antlr 4.7.1, add CHANGELOG * Fix linter issue * Fix typo --- CHANGELOG.next.asciidoc | 1 + internal/pkg/agent/transpiler/vars_test.go | 11 +- .../providers/kubernetes/node_test.go | 14 +- .../providers/kubernetes/pod_test.go | 34 ++-- .../providers/kubernetes/service_test.go | 12 +- internal/pkg/eql/Eql.g4 | 2 +- internal/pkg/eql/eql_test.go | 21 ++- internal/pkg/eql/parser/EqlLexer.interp | 2 +- internal/pkg/eql/parser/eql_lexer.go | 155 +++++++++--------- 9 files changed, 143 insertions(+), 109 deletions(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 602b0293cc6..c9e71a035b7 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -107,6 +107,7 @@ - Collects stdout and stderr of applications run as a process and logs them. {issue}[88] - Remove VerificationMode option to empty string. Default value is `full`. {issue}[184] - diagnostics collect file mod times are set. {pull}570[570] +- Allow the - char to appear as part of variable names in eql expressions. {issue}709[709] {pull}710[710] ==== New features diff --git a/internal/pkg/agent/transpiler/vars_test.go b/internal/pkg/agent/transpiler/vars_test.go index 5dd6d41ec72..8171a4b7a9f 100644 --- a/internal/pkg/agent/transpiler/vars_test.go +++ b/internal/pkg/agent/transpiler/vars_test.go @@ -17,8 +17,9 @@ import ( func TestVars_Replace(t *testing.T) { vars := mustMakeVars(map[string]interface{}{ "un-der_score": map[string]interface{}{ - "key1": "data1", - "key2": "data2", + "key1": "data1", + "key2": "data2", + "with-dash": "dash-value", "list": []string{ "array1", "array2", @@ -44,6 +45,12 @@ func TestVars_Replace(t *testing.T) { false, false, }, + { + "${un-der_score.with-dash}", + NewStrVal("dash-value"), + false, + false, + }, { "${un-der_score.missing}", NewStrVal(""), diff --git a/internal/pkg/composable/providers/kubernetes/node_test.go b/internal/pkg/composable/providers/kubernetes/node_test.go index 7d8abfcea4e..547702573c4 100644 --- a/internal/pkg/composable/providers/kubernetes/node_test.go +++ b/internal/pkg/composable/providers/kubernetes/node_test.go @@ -26,7 +26,8 @@ func TestGenerateNodeData(t *testing.T) { Name: "testnode", UID: types.UID(uid), Labels: map[string]string{ - "foo": "bar", + "foo": "bar", + "with-dash": "dash-value", }, Annotations: map[string]string{ "baz": "ban", @@ -54,7 +55,8 @@ func TestGenerateNodeData(t *testing.T) { "baz": "ban", }, "labels": mapstr.M{ - "foo": "bar", + "foo": "bar", + "with-dash": "dash-value", }, } @@ -64,7 +66,10 @@ func TestGenerateNodeData(t *testing.T) { "name": "devcluster", "url": "8.8.8.8:9090"}, }, "kubernetes": mapstr.M{ - "labels": mapstr.M{"foo": "bar"}, + "labels": mapstr.M{ + "foo": "bar", + "with-dash": "dash-value", + }, "annotations": mapstr.M{"baz": "ban"}, "node": mapstr.M{ "ip": "node1", @@ -123,7 +128,8 @@ func (n *nodeMeta) GenerateK8s(obj kubernetes.Resource, opts ...metadata.FieldOp "ip": "node1", }, "labels": mapstr.M{ - "foo": "bar", + "foo": "bar", + "with-dash": "dash-value", }, "annotations": mapstr.M{ "baz": "ban", diff --git a/internal/pkg/composable/providers/kubernetes/pod_test.go b/internal/pkg/composable/providers/kubernetes/pod_test.go index 45fd78ac76c..feeba193472 100644 --- a/internal/pkg/composable/providers/kubernetes/pod_test.go +++ b/internal/pkg/composable/providers/kubernetes/pod_test.go @@ -27,7 +27,8 @@ func TestGeneratePodData(t *testing.T) { UID: types.UID(uid), Namespace: "testns", Labels: map[string]string{ - "foo": "bar", + "foo": "bar", + "with-dash": "dash-value", }, Annotations: map[string]string{ "app": "production", @@ -59,7 +60,8 @@ func TestGeneratePodData(t *testing.T) { "nsa": "nsb", }, "labels": mapstr.M{ - "foo": "bar", + "foo": "bar", + "with-dash": "dash-value", }, "annotations": mapstr.M{ "app": "production", @@ -74,7 +76,8 @@ func TestGeneratePodData(t *testing.T) { }, "kubernetes": mapstr.M{ "namespace": "testns", "labels": mapstr.M{ - "foo": "bar", + "foo": "bar", + "with-dash": "dash-value", }, "annotations": mapstr.M{"app": "production"}, "pod": mapstr.M{ @@ -119,7 +122,8 @@ func TestGenerateContainerPodData(t *testing.T) { UID: types.UID(uid), Namespace: "testns", Labels: map[string]string{ - "foo": "bar", + "foo": "bar", + "with-dash": "dash-value", }, Annotations: map[string]string{ "app": "production", @@ -175,7 +179,8 @@ func TestGenerateContainerPodData(t *testing.T) { "app": "production", }, "labels": mapstr.M{ - "foo": "bar", + "foo": "bar", + "with-dash": "dash-value", }, } @@ -191,7 +196,9 @@ func TestGenerateContainerPodData(t *testing.T) { }, "kubernetes": mapstr.M{ "namespace": "testns", "annotations": mapstr.M{"app": "production"}, - "labels": mapstr.M{"foo": "bar"}, + "labels": mapstr.M{"foo": "bar", + "with-dash": "dash-value", + }, "pod": mapstr.M{ "ip": "127.0.0.5", "name": "testpod", @@ -232,7 +239,8 @@ func TestEphemeralContainers(t *testing.T) { UID: types.UID(uid), Namespace: "testns", Labels: map[string]string{ - "foo": "bar", + "foo": "bar", + "with-dash": "dash-value", }, Annotations: map[string]string{ "app": "production", @@ -274,7 +282,8 @@ func TestEphemeralContainers(t *testing.T) { "ip": pod.Status.PodIP, }, "labels": mapstr.M{ - "foo": "bar", + "foo": "bar", + "with-dash": "dash-value", }, "container": mapstr.M{ "id": "asdfghdeadbeef", @@ -300,8 +309,10 @@ func TestEphemeralContainers(t *testing.T) { "name": "devcluster", "url": "8.8.8.8:9090"}, }, "kubernetes": mapstr.M{ - "namespace": "testns", - "labels": mapstr.M{"foo": "bar"}, + "namespace": "testns", + "labels": mapstr.M{"foo": "bar", + "with-dash": "dash-value", + }, "annotations": mapstr.M{"app": "production"}, "pod": mapstr.M{ "ip": "127.0.0.5", @@ -383,7 +394,8 @@ func (p *podMeta) GenerateK8s(obj kubernetes.Resource, opts ...metadata.FieldOpt "ip": k8sPod.Status.PodIP, }, "labels": mapstr.M{ - "foo": "bar", + "foo": "bar", + "with-dash": "dash-value", }, "annotations": mapstr.M{ "app": "production", diff --git a/internal/pkg/composable/providers/kubernetes/service_test.go b/internal/pkg/composable/providers/kubernetes/service_test.go index 47d420fb233..0fbed196908 100644 --- a/internal/pkg/composable/providers/kubernetes/service_test.go +++ b/internal/pkg/composable/providers/kubernetes/service_test.go @@ -25,7 +25,8 @@ func TestGenerateServiceData(t *testing.T) { UID: types.UID(uid), Namespace: "testns", Labels: map[string]string{ - "foo": "bar", + "foo": "bar", + "with-dash": "dash-value", }, Annotations: map[string]string{ "baz": "ban", @@ -64,7 +65,8 @@ func TestGenerateServiceData(t *testing.T) { "baz": "ban", }, "labels": mapstr.M{ - "foo": "bar", + "foo": "bar", + "with-dash": "dash-value", }, } @@ -80,7 +82,8 @@ func TestGenerateServiceData(t *testing.T) { "ip": "1.2.3.4", }, "labels": mapstr.M{ - "foo": "bar", + "foo": "bar", + "with-dash": "dash-value", }, "annotations": mapstr.M{ "baz": "ban", @@ -139,7 +142,8 @@ func (s *svcMeta) GenerateK8s(obj kubernetes.Resource, opts ...metadata.FieldOpt "ip": "1.2.3.4", }, "labels": mapstr.M{ - "foo": "bar", + "foo": "bar", + "with-dash": "dash-value", }, "annotations": mapstr.M{ "baz": "ban", diff --git a/internal/pkg/eql/Eql.g4 b/internal/pkg/eql/Eql.g4 index d46e2571812..b6731d41cef 100644 --- a/internal/pkg/eql/Eql.g4 +++ b/internal/pkg/eql/Eql.g4 @@ -22,7 +22,7 @@ NUMBER: [\-]? [0-9]+; WHITESPACE: [ \r\n\t]+ -> skip; NOT: 'NOT' | 'not'; NAME: [a-zA-Z_] [a-zA-Z0-9_]*; -VNAME: [a-zA-Z0-9_.]+('.'[a-zA-Z0-9_]+)*; +VNAME: [a-zA-Z0-9_.-]+('.'[a-zA-Z0-9_-]+)*; STEXT: '\'' ~[\r\n']* '\''; DTEXT: '"' ~[\r\n"]* '"'; LPAR: '('; diff --git a/internal/pkg/eql/eql_test.go b/internal/pkg/eql/eql_test.go index eab34f69026..77bf5cb37f0 100644 --- a/internal/pkg/eql/eql_test.go +++ b/internal/pkg/eql/eql_test.go @@ -42,6 +42,8 @@ func TestEql(t *testing.T) { {expression: "${env.MISSING|host.MISSING|true} == true", result: true}, {expression: "${env.MISSING|host.MISSING|false} == false", result: true}, {expression: "${'constant'} == 'constant'", result: true}, + {expression: "${data.with-dash} == 'dash-value'", result: true}, + {expression: "${'dash-value'} == 'dash-value'", result: true}, // boolean {expression: "true", result: true}, @@ -306,9 +308,10 @@ func TestEql(t *testing.T) { store := &testVarStore{ vars: map[string]interface{}{ - "env.HOSTNAME": "my-hostname", - "host.name": "host-name", - "data.array": []interface{}{"array1", "array2", "array3"}, + "env.HOSTNAME": "my-hostname", + "host.name": "host-name", + "data.array": []interface{}{"array1", "array2", "array3"}, + "data.with-dash": "dash-value", "data.dict": map[string]interface{}{ "key1": "dict1", "key2": "dict2", @@ -327,7 +330,7 @@ func TestEql(t *testing.T) { } t.Run(title, func(t *testing.T) { if showDebug == "1" { - debug(test.expression) + debug(t, test.expression) } r, err := Eval(test.expression, store) @@ -343,17 +346,17 @@ func TestEql(t *testing.T) { } } -func debug(expression string) { +func debug(t *testing.T, expression string) { raw := antlr.NewInputStream(expression) lexer := parser.NewEqlLexer(raw) for { - t := lexer.NextToken() - if t.GetTokenType() == antlr.TokenEOF { + token := lexer.NextToken() + if token.GetTokenType() == antlr.TokenEOF { break } - fmt.Printf("%s (%q)\n", - lexer.SymbolicNames[t.GetTokenType()], t.GetText()) + t.Logf("%s (%q)\n", + lexer.SymbolicNames[token.GetTokenType()], token.GetText()) } } diff --git a/internal/pkg/eql/parser/EqlLexer.interp b/internal/pkg/eql/parser/EqlLexer.interp index 2131aba8177..3432105b62f 100644 --- a/internal/pkg/eql/parser/EqlLexer.interp +++ b/internal/pkg/eql/parser/EqlLexer.interp @@ -113,4 +113,4 @@ mode names: DEFAULT_MODE atn: -[3, 24715, 42794, 33075, 47597, 16764, 15335, 30598, 22884, 2, 35, 230, 8, 1, 4, 2, 9, 2, 4, 3, 9, 3, 4, 4, 9, 4, 4, 5, 9, 5, 4, 6, 9, 6, 4, 7, 9, 7, 4, 8, 9, 8, 4, 9, 9, 9, 4, 10, 9, 10, 4, 11, 9, 11, 4, 12, 9, 12, 4, 13, 9, 13, 4, 14, 9, 14, 4, 15, 9, 15, 4, 16, 9, 16, 4, 17, 9, 17, 4, 18, 9, 18, 4, 19, 9, 19, 4, 20, 9, 20, 4, 21, 9, 21, 4, 22, 9, 22, 4, 23, 9, 23, 4, 24, 9, 24, 4, 25, 9, 25, 4, 26, 9, 26, 4, 27, 9, 27, 4, 28, 9, 28, 4, 29, 9, 29, 4, 30, 9, 30, 4, 31, 9, 31, 4, 32, 9, 32, 4, 33, 9, 33, 4, 34, 9, 34, 3, 2, 3, 2, 3, 3, 3, 3, 3, 4, 3, 4, 3, 5, 3, 5, 3, 5, 3, 6, 3, 6, 3, 6, 3, 7, 3, 7, 3, 8, 3, 8, 3, 9, 3, 9, 3, 9, 3, 10, 3, 10, 3, 10, 3, 11, 3, 11, 3, 12, 3, 12, 3, 13, 3, 13, 3, 14, 3, 14, 3, 15, 3, 15, 3, 16, 3, 16, 3, 16, 3, 16, 3, 16, 3, 16, 5, 16, 108, 10, 16, 3, 17, 3, 17, 3, 17, 3, 17, 5, 17, 114, 10, 17, 3, 18, 3, 18, 3, 18, 3, 18, 3, 18, 3, 18, 3, 18, 3, 18, 5, 18, 124, 10, 18, 3, 19, 3, 19, 3, 19, 3, 19, 3, 19, 3, 19, 3, 19, 3, 19, 3, 19, 3, 19, 5, 19, 136, 10, 19, 3, 20, 5, 20, 139, 10, 20, 3, 20, 6, 20, 142, 10, 20, 13, 20, 14, 20, 143, 3, 20, 3, 20, 6, 20, 148, 10, 20, 13, 20, 14, 20, 149, 3, 21, 5, 21, 153, 10, 21, 3, 21, 6, 21, 156, 10, 21, 13, 21, 14, 21, 157, 3, 22, 6, 22, 161, 10, 22, 13, 22, 14, 22, 162, 3, 22, 3, 22, 3, 23, 3, 23, 3, 23, 3, 23, 3, 23, 3, 23, 5, 23, 173, 10, 23, 3, 24, 3, 24, 7, 24, 177, 10, 24, 12, 24, 14, 24, 180, 11, 24, 3, 25, 6, 25, 183, 10, 25, 13, 25, 14, 25, 184, 3, 25, 3, 25, 6, 25, 189, 10, 25, 13, 25, 14, 25, 190, 7, 25, 193, 10, 25, 12, 25, 14, 25, 196, 11, 25, 3, 26, 3, 26, 7, 26, 200, 10, 26, 12, 26, 14, 26, 203, 11, 26, 3, 26, 3, 26, 3, 27, 3, 27, 7, 27, 209, 10, 27, 12, 27, 14, 27, 212, 11, 27, 3, 27, 3, 27, 3, 28, 3, 28, 3, 29, 3, 29, 3, 30, 3, 30, 3, 31, 3, 31, 3, 32, 3, 32, 3, 33, 3, 33, 3, 34, 3, 34, 3, 34, 2, 2, 35, 3, 3, 5, 4, 7, 5, 9, 6, 11, 7, 13, 8, 15, 9, 17, 10, 19, 11, 21, 12, 23, 13, 25, 14, 27, 15, 29, 16, 31, 17, 33, 18, 35, 19, 37, 20, 39, 21, 41, 22, 43, 23, 45, 24, 47, 25, 49, 26, 51, 27, 53, 28, 55, 29, 57, 30, 59, 31, 61, 32, 63, 33, 65, 34, 67, 35, 3, 2, 10, 3, 2, 47, 47, 3, 2, 50, 59, 5, 2, 11, 12, 15, 15, 34, 34, 5, 2, 67, 92, 97, 97, 99, 124, 6, 2, 50, 59, 67, 92, 97, 97, 99, 124, 7, 2, 48, 48, 50, 59, 67, 92, 97, 97, 99, 124, 5, 2, 12, 12, 15, 15, 41, 41, 5, 2, 12, 12, 15, 15, 36, 36, 2, 246, 2, 3, 3, 2, 2, 2, 2, 5, 3, 2, 2, 2, 2, 7, 3, 2, 2, 2, 2, 9, 3, 2, 2, 2, 2, 11, 3, 2, 2, 2, 2, 13, 3, 2, 2, 2, 2, 15, 3, 2, 2, 2, 2, 17, 3, 2, 2, 2, 2, 19, 3, 2, 2, 2, 2, 21, 3, 2, 2, 2, 2, 23, 3, 2, 2, 2, 2, 25, 3, 2, 2, 2, 2, 27, 3, 2, 2, 2, 2, 29, 3, 2, 2, 2, 2, 31, 3, 2, 2, 2, 2, 33, 3, 2, 2, 2, 2, 35, 3, 2, 2, 2, 2, 37, 3, 2, 2, 2, 2, 39, 3, 2, 2, 2, 2, 41, 3, 2, 2, 2, 2, 43, 3, 2, 2, 2, 2, 45, 3, 2, 2, 2, 2, 47, 3, 2, 2, 2, 2, 49, 3, 2, 2, 2, 2, 51, 3, 2, 2, 2, 2, 53, 3, 2, 2, 2, 2, 55, 3, 2, 2, 2, 2, 57, 3, 2, 2, 2, 2, 59, 3, 2, 2, 2, 2, 61, 3, 2, 2, 2, 2, 63, 3, 2, 2, 2, 2, 65, 3, 2, 2, 2, 2, 67, 3, 2, 2, 2, 3, 69, 3, 2, 2, 2, 5, 71, 3, 2, 2, 2, 7, 73, 3, 2, 2, 2, 9, 75, 3, 2, 2, 2, 11, 78, 3, 2, 2, 2, 13, 81, 3, 2, 2, 2, 15, 83, 3, 2, 2, 2, 17, 85, 3, 2, 2, 2, 19, 88, 3, 2, 2, 2, 21, 91, 3, 2, 2, 2, 23, 93, 3, 2, 2, 2, 25, 95, 3, 2, 2, 2, 27, 97, 3, 2, 2, 2, 29, 99, 3, 2, 2, 2, 31, 107, 3, 2, 2, 2, 33, 113, 3, 2, 2, 2, 35, 123, 3, 2, 2, 2, 37, 135, 3, 2, 2, 2, 39, 138, 3, 2, 2, 2, 41, 152, 3, 2, 2, 2, 43, 160, 3, 2, 2, 2, 45, 172, 3, 2, 2, 2, 47, 174, 3, 2, 2, 2, 49, 182, 3, 2, 2, 2, 51, 197, 3, 2, 2, 2, 53, 206, 3, 2, 2, 2, 55, 215, 3, 2, 2, 2, 57, 217, 3, 2, 2, 2, 59, 219, 3, 2, 2, 2, 61, 221, 3, 2, 2, 2, 63, 223, 3, 2, 2, 2, 65, 225, 3, 2, 2, 2, 67, 227, 3, 2, 2, 2, 69, 70, 7, 126, 2, 2, 70, 4, 3, 2, 2, 2, 71, 72, 7, 46, 2, 2, 72, 6, 3, 2, 2, 2, 73, 74, 7, 60, 2, 2, 74, 8, 3, 2, 2, 2, 75, 76, 7, 63, 2, 2, 76, 77, 7, 63, 2, 2, 77, 10, 3, 2, 2, 2, 78, 79, 7, 35, 2, 2, 79, 80, 7, 63, 2, 2, 80, 12, 3, 2, 2, 2, 81, 82, 7, 64, 2, 2, 82, 14, 3, 2, 2, 2, 83, 84, 7, 62, 2, 2, 84, 16, 3, 2, 2, 2, 85, 86, 7, 64, 2, 2, 86, 87, 7, 63, 2, 2, 87, 18, 3, 2, 2, 2, 88, 89, 7, 62, 2, 2, 89, 90, 7, 63, 2, 2, 90, 20, 3, 2, 2, 2, 91, 92, 7, 45, 2, 2, 92, 22, 3, 2, 2, 2, 93, 94, 7, 47, 2, 2, 94, 24, 3, 2, 2, 2, 95, 96, 7, 44, 2, 2, 96, 26, 3, 2, 2, 2, 97, 98, 7, 49, 2, 2, 98, 28, 3, 2, 2, 2, 99, 100, 7, 39, 2, 2, 100, 30, 3, 2, 2, 2, 101, 102, 7, 99, 2, 2, 102, 103, 7, 112, 2, 2, 103, 108, 7, 102, 2, 2, 104, 105, 7, 67, 2, 2, 105, 106, 7, 80, 2, 2, 106, 108, 7, 70, 2, 2, 107, 101, 3, 2, 2, 2, 107, 104, 3, 2, 2, 2, 108, 32, 3, 2, 2, 2, 109, 110, 7, 113, 2, 2, 110, 114, 7, 116, 2, 2, 111, 112, 7, 81, 2, 2, 112, 114, 7, 84, 2, 2, 113, 109, 3, 2, 2, 2, 113, 111, 3, 2, 2, 2, 114, 34, 3, 2, 2, 2, 115, 116, 7, 118, 2, 2, 116, 117, 7, 116, 2, 2, 117, 118, 7, 119, 2, 2, 118, 124, 7, 103, 2, 2, 119, 120, 7, 86, 2, 2, 120, 121, 7, 84, 2, 2, 121, 122, 7, 87, 2, 2, 122, 124, 7, 71, 2, 2, 123, 115, 3, 2, 2, 2, 123, 119, 3, 2, 2, 2, 124, 36, 3, 2, 2, 2, 125, 126, 7, 104, 2, 2, 126, 127, 7, 99, 2, 2, 127, 128, 7, 110, 2, 2, 128, 129, 7, 117, 2, 2, 129, 136, 7, 103, 2, 2, 130, 131, 7, 72, 2, 2, 131, 132, 7, 67, 2, 2, 132, 133, 7, 78, 2, 2, 133, 134, 7, 85, 2, 2, 134, 136, 7, 71, 2, 2, 135, 125, 3, 2, 2, 2, 135, 130, 3, 2, 2, 2, 136, 38, 3, 2, 2, 2, 137, 139, 9, 2, 2, 2, 138, 137, 3, 2, 2, 2, 138, 139, 3, 2, 2, 2, 139, 141, 3, 2, 2, 2, 140, 142, 9, 3, 2, 2, 141, 140, 3, 2, 2, 2, 142, 143, 3, 2, 2, 2, 143, 141, 3, 2, 2, 2, 143, 144, 3, 2, 2, 2, 144, 145, 3, 2, 2, 2, 145, 147, 7, 48, 2, 2, 146, 148, 9, 3, 2, 2, 147, 146, 3, 2, 2, 2, 148, 149, 3, 2, 2, 2, 149, 147, 3, 2, 2, 2, 149, 150, 3, 2, 2, 2, 150, 40, 3, 2, 2, 2, 151, 153, 9, 2, 2, 2, 152, 151, 3, 2, 2, 2, 152, 153, 3, 2, 2, 2, 153, 155, 3, 2, 2, 2, 154, 156, 9, 3, 2, 2, 155, 154, 3, 2, 2, 2, 156, 157, 3, 2, 2, 2, 157, 155, 3, 2, 2, 2, 157, 158, 3, 2, 2, 2, 158, 42, 3, 2, 2, 2, 159, 161, 9, 4, 2, 2, 160, 159, 3, 2, 2, 2, 161, 162, 3, 2, 2, 2, 162, 160, 3, 2, 2, 2, 162, 163, 3, 2, 2, 2, 163, 164, 3, 2, 2, 2, 164, 165, 8, 22, 2, 2, 165, 44, 3, 2, 2, 2, 166, 167, 7, 80, 2, 2, 167, 168, 7, 81, 2, 2, 168, 173, 7, 86, 2, 2, 169, 170, 7, 112, 2, 2, 170, 171, 7, 113, 2, 2, 171, 173, 7, 118, 2, 2, 172, 166, 3, 2, 2, 2, 172, 169, 3, 2, 2, 2, 173, 46, 3, 2, 2, 2, 174, 178, 9, 5, 2, 2, 175, 177, 9, 6, 2, 2, 176, 175, 3, 2, 2, 2, 177, 180, 3, 2, 2, 2, 178, 176, 3, 2, 2, 2, 178, 179, 3, 2, 2, 2, 179, 48, 3, 2, 2, 2, 180, 178, 3, 2, 2, 2, 181, 183, 9, 7, 2, 2, 182, 181, 3, 2, 2, 2, 183, 184, 3, 2, 2, 2, 184, 182, 3, 2, 2, 2, 184, 185, 3, 2, 2, 2, 185, 194, 3, 2, 2, 2, 186, 188, 7, 48, 2, 2, 187, 189, 9, 6, 2, 2, 188, 187, 3, 2, 2, 2, 189, 190, 3, 2, 2, 2, 190, 188, 3, 2, 2, 2, 190, 191, 3, 2, 2, 2, 191, 193, 3, 2, 2, 2, 192, 186, 3, 2, 2, 2, 193, 196, 3, 2, 2, 2, 194, 192, 3, 2, 2, 2, 194, 195, 3, 2, 2, 2, 195, 50, 3, 2, 2, 2, 196, 194, 3, 2, 2, 2, 197, 201, 7, 41, 2, 2, 198, 200, 10, 8, 2, 2, 199, 198, 3, 2, 2, 2, 200, 203, 3, 2, 2, 2, 201, 199, 3, 2, 2, 2, 201, 202, 3, 2, 2, 2, 202, 204, 3, 2, 2, 2, 203, 201, 3, 2, 2, 2, 204, 205, 7, 41, 2, 2, 205, 52, 3, 2, 2, 2, 206, 210, 7, 36, 2, 2, 207, 209, 10, 9, 2, 2, 208, 207, 3, 2, 2, 2, 209, 212, 3, 2, 2, 2, 210, 208, 3, 2, 2, 2, 210, 211, 3, 2, 2, 2, 211, 213, 3, 2, 2, 2, 212, 210, 3, 2, 2, 2, 213, 214, 7, 36, 2, 2, 214, 54, 3, 2, 2, 2, 215, 216, 7, 42, 2, 2, 216, 56, 3, 2, 2, 2, 217, 218, 7, 43, 2, 2, 218, 58, 3, 2, 2, 2, 219, 220, 7, 93, 2, 2, 220, 60, 3, 2, 2, 2, 221, 222, 7, 95, 2, 2, 222, 62, 3, 2, 2, 2, 223, 224, 7, 125, 2, 2, 224, 64, 3, 2, 2, 2, 225, 226, 7, 127, 2, 2, 226, 66, 3, 2, 2, 2, 227, 228, 7, 38, 2, 2, 228, 229, 7, 125, 2, 2, 229, 68, 3, 2, 2, 2, 20, 2, 107, 113, 123, 135, 138, 143, 149, 152, 157, 162, 172, 178, 184, 190, 194, 201, 210, 3, 8, 2, 2] \ No newline at end of file +[3, 24715, 42794, 33075, 47597, 16764, 15335, 30598, 22884, 2, 35, 230, 8, 1, 4, 2, 9, 2, 4, 3, 9, 3, 4, 4, 9, 4, 4, 5, 9, 5, 4, 6, 9, 6, 4, 7, 9, 7, 4, 8, 9, 8, 4, 9, 9, 9, 4, 10, 9, 10, 4, 11, 9, 11, 4, 12, 9, 12, 4, 13, 9, 13, 4, 14, 9, 14, 4, 15, 9, 15, 4, 16, 9, 16, 4, 17, 9, 17, 4, 18, 9, 18, 4, 19, 9, 19, 4, 20, 9, 20, 4, 21, 9, 21, 4, 22, 9, 22, 4, 23, 9, 23, 4, 24, 9, 24, 4, 25, 9, 25, 4, 26, 9, 26, 4, 27, 9, 27, 4, 28, 9, 28, 4, 29, 9, 29, 4, 30, 9, 30, 4, 31, 9, 31, 4, 32, 9, 32, 4, 33, 9, 33, 4, 34, 9, 34, 3, 2, 3, 2, 3, 3, 3, 3, 3, 4, 3, 4, 3, 5, 3, 5, 3, 5, 3, 6, 3, 6, 3, 6, 3, 7, 3, 7, 3, 8, 3, 8, 3, 9, 3, 9, 3, 9, 3, 10, 3, 10, 3, 10, 3, 11, 3, 11, 3, 12, 3, 12, 3, 13, 3, 13, 3, 14, 3, 14, 3, 15, 3, 15, 3, 16, 3, 16, 3, 16, 3, 16, 3, 16, 3, 16, 5, 16, 108, 10, 16, 3, 17, 3, 17, 3, 17, 3, 17, 5, 17, 114, 10, 17, 3, 18, 3, 18, 3, 18, 3, 18, 3, 18, 3, 18, 3, 18, 3, 18, 5, 18, 124, 10, 18, 3, 19, 3, 19, 3, 19, 3, 19, 3, 19, 3, 19, 3, 19, 3, 19, 3, 19, 3, 19, 5, 19, 136, 10, 19, 3, 20, 5, 20, 139, 10, 20, 3, 20, 6, 20, 142, 10, 20, 13, 20, 14, 20, 143, 3, 20, 3, 20, 6, 20, 148, 10, 20, 13, 20, 14, 20, 149, 3, 21, 5, 21, 153, 10, 21, 3, 21, 6, 21, 156, 10, 21, 13, 21, 14, 21, 157, 3, 22, 6, 22, 161, 10, 22, 13, 22, 14, 22, 162, 3, 22, 3, 22, 3, 23, 3, 23, 3, 23, 3, 23, 3, 23, 3, 23, 5, 23, 173, 10, 23, 3, 24, 3, 24, 7, 24, 177, 10, 24, 12, 24, 14, 24, 180, 11, 24, 3, 25, 6, 25, 183, 10, 25, 13, 25, 14, 25, 184, 3, 25, 3, 25, 6, 25, 189, 10, 25, 13, 25, 14, 25, 190, 7, 25, 193, 10, 25, 12, 25, 14, 25, 196, 11, 25, 3, 26, 3, 26, 7, 26, 200, 10, 26, 12, 26, 14, 26, 203, 11, 26, 3, 26, 3, 26, 3, 27, 3, 27, 7, 27, 209, 10, 27, 12, 27, 14, 27, 212, 11, 27, 3, 27, 3, 27, 3, 28, 3, 28, 3, 29, 3, 29, 3, 30, 3, 30, 3, 31, 3, 31, 3, 32, 3, 32, 3, 33, 3, 33, 3, 34, 3, 34, 3, 34, 2, 2, 35, 3, 3, 5, 4, 7, 5, 9, 6, 11, 7, 13, 8, 15, 9, 17, 10, 19, 11, 21, 12, 23, 13, 25, 14, 27, 15, 29, 16, 31, 17, 33, 18, 35, 19, 37, 20, 39, 21, 41, 22, 43, 23, 45, 24, 47, 25, 49, 26, 51, 27, 53, 28, 55, 29, 57, 30, 59, 31, 61, 32, 63, 33, 65, 34, 67, 35, 3, 2, 11, 3, 2, 47, 47, 3, 2, 50, 59, 5, 2, 11, 12, 15, 15, 34, 34, 5, 2, 67, 92, 97, 97, 99, 124, 6, 2, 50, 59, 67, 92, 97, 97, 99, 124, 7, 2, 47, 48, 50, 59, 67, 92, 97, 97, 99, 124, 7, 2, 47, 47, 50, 59, 67, 92, 97, 97, 99, 124, 5, 2, 12, 12, 15, 15, 41, 41, 5, 2, 12, 12, 15, 15, 36, 36, 2, 246, 2, 3, 3, 2, 2, 2, 2, 5, 3, 2, 2, 2, 2, 7, 3, 2, 2, 2, 2, 9, 3, 2, 2, 2, 2, 11, 3, 2, 2, 2, 2, 13, 3, 2, 2, 2, 2, 15, 3, 2, 2, 2, 2, 17, 3, 2, 2, 2, 2, 19, 3, 2, 2, 2, 2, 21, 3, 2, 2, 2, 2, 23, 3, 2, 2, 2, 2, 25, 3, 2, 2, 2, 2, 27, 3, 2, 2, 2, 2, 29, 3, 2, 2, 2, 2, 31, 3, 2, 2, 2, 2, 33, 3, 2, 2, 2, 2, 35, 3, 2, 2, 2, 2, 37, 3, 2, 2, 2, 2, 39, 3, 2, 2, 2, 2, 41, 3, 2, 2, 2, 2, 43, 3, 2, 2, 2, 2, 45, 3, 2, 2, 2, 2, 47, 3, 2, 2, 2, 2, 49, 3, 2, 2, 2, 2, 51, 3, 2, 2, 2, 2, 53, 3, 2, 2, 2, 2, 55, 3, 2, 2, 2, 2, 57, 3, 2, 2, 2, 2, 59, 3, 2, 2, 2, 2, 61, 3, 2, 2, 2, 2, 63, 3, 2, 2, 2, 2, 65, 3, 2, 2, 2, 2, 67, 3, 2, 2, 2, 3, 69, 3, 2, 2, 2, 5, 71, 3, 2, 2, 2, 7, 73, 3, 2, 2, 2, 9, 75, 3, 2, 2, 2, 11, 78, 3, 2, 2, 2, 13, 81, 3, 2, 2, 2, 15, 83, 3, 2, 2, 2, 17, 85, 3, 2, 2, 2, 19, 88, 3, 2, 2, 2, 21, 91, 3, 2, 2, 2, 23, 93, 3, 2, 2, 2, 25, 95, 3, 2, 2, 2, 27, 97, 3, 2, 2, 2, 29, 99, 3, 2, 2, 2, 31, 107, 3, 2, 2, 2, 33, 113, 3, 2, 2, 2, 35, 123, 3, 2, 2, 2, 37, 135, 3, 2, 2, 2, 39, 138, 3, 2, 2, 2, 41, 152, 3, 2, 2, 2, 43, 160, 3, 2, 2, 2, 45, 172, 3, 2, 2, 2, 47, 174, 3, 2, 2, 2, 49, 182, 3, 2, 2, 2, 51, 197, 3, 2, 2, 2, 53, 206, 3, 2, 2, 2, 55, 215, 3, 2, 2, 2, 57, 217, 3, 2, 2, 2, 59, 219, 3, 2, 2, 2, 61, 221, 3, 2, 2, 2, 63, 223, 3, 2, 2, 2, 65, 225, 3, 2, 2, 2, 67, 227, 3, 2, 2, 2, 69, 70, 7, 126, 2, 2, 70, 4, 3, 2, 2, 2, 71, 72, 7, 46, 2, 2, 72, 6, 3, 2, 2, 2, 73, 74, 7, 60, 2, 2, 74, 8, 3, 2, 2, 2, 75, 76, 7, 63, 2, 2, 76, 77, 7, 63, 2, 2, 77, 10, 3, 2, 2, 2, 78, 79, 7, 35, 2, 2, 79, 80, 7, 63, 2, 2, 80, 12, 3, 2, 2, 2, 81, 82, 7, 64, 2, 2, 82, 14, 3, 2, 2, 2, 83, 84, 7, 62, 2, 2, 84, 16, 3, 2, 2, 2, 85, 86, 7, 64, 2, 2, 86, 87, 7, 63, 2, 2, 87, 18, 3, 2, 2, 2, 88, 89, 7, 62, 2, 2, 89, 90, 7, 63, 2, 2, 90, 20, 3, 2, 2, 2, 91, 92, 7, 45, 2, 2, 92, 22, 3, 2, 2, 2, 93, 94, 7, 47, 2, 2, 94, 24, 3, 2, 2, 2, 95, 96, 7, 44, 2, 2, 96, 26, 3, 2, 2, 2, 97, 98, 7, 49, 2, 2, 98, 28, 3, 2, 2, 2, 99, 100, 7, 39, 2, 2, 100, 30, 3, 2, 2, 2, 101, 102, 7, 99, 2, 2, 102, 103, 7, 112, 2, 2, 103, 108, 7, 102, 2, 2, 104, 105, 7, 67, 2, 2, 105, 106, 7, 80, 2, 2, 106, 108, 7, 70, 2, 2, 107, 101, 3, 2, 2, 2, 107, 104, 3, 2, 2, 2, 108, 32, 3, 2, 2, 2, 109, 110, 7, 113, 2, 2, 110, 114, 7, 116, 2, 2, 111, 112, 7, 81, 2, 2, 112, 114, 7, 84, 2, 2, 113, 109, 3, 2, 2, 2, 113, 111, 3, 2, 2, 2, 114, 34, 3, 2, 2, 2, 115, 116, 7, 118, 2, 2, 116, 117, 7, 116, 2, 2, 117, 118, 7, 119, 2, 2, 118, 124, 7, 103, 2, 2, 119, 120, 7, 86, 2, 2, 120, 121, 7, 84, 2, 2, 121, 122, 7, 87, 2, 2, 122, 124, 7, 71, 2, 2, 123, 115, 3, 2, 2, 2, 123, 119, 3, 2, 2, 2, 124, 36, 3, 2, 2, 2, 125, 126, 7, 104, 2, 2, 126, 127, 7, 99, 2, 2, 127, 128, 7, 110, 2, 2, 128, 129, 7, 117, 2, 2, 129, 136, 7, 103, 2, 2, 130, 131, 7, 72, 2, 2, 131, 132, 7, 67, 2, 2, 132, 133, 7, 78, 2, 2, 133, 134, 7, 85, 2, 2, 134, 136, 7, 71, 2, 2, 135, 125, 3, 2, 2, 2, 135, 130, 3, 2, 2, 2, 136, 38, 3, 2, 2, 2, 137, 139, 9, 2, 2, 2, 138, 137, 3, 2, 2, 2, 138, 139, 3, 2, 2, 2, 139, 141, 3, 2, 2, 2, 140, 142, 9, 3, 2, 2, 141, 140, 3, 2, 2, 2, 142, 143, 3, 2, 2, 2, 143, 141, 3, 2, 2, 2, 143, 144, 3, 2, 2, 2, 144, 145, 3, 2, 2, 2, 145, 147, 7, 48, 2, 2, 146, 148, 9, 3, 2, 2, 147, 146, 3, 2, 2, 2, 148, 149, 3, 2, 2, 2, 149, 147, 3, 2, 2, 2, 149, 150, 3, 2, 2, 2, 150, 40, 3, 2, 2, 2, 151, 153, 9, 2, 2, 2, 152, 151, 3, 2, 2, 2, 152, 153, 3, 2, 2, 2, 153, 155, 3, 2, 2, 2, 154, 156, 9, 3, 2, 2, 155, 154, 3, 2, 2, 2, 156, 157, 3, 2, 2, 2, 157, 155, 3, 2, 2, 2, 157, 158, 3, 2, 2, 2, 158, 42, 3, 2, 2, 2, 159, 161, 9, 4, 2, 2, 160, 159, 3, 2, 2, 2, 161, 162, 3, 2, 2, 2, 162, 160, 3, 2, 2, 2, 162, 163, 3, 2, 2, 2, 163, 164, 3, 2, 2, 2, 164, 165, 8, 22, 2, 2, 165, 44, 3, 2, 2, 2, 166, 167, 7, 80, 2, 2, 167, 168, 7, 81, 2, 2, 168, 173, 7, 86, 2, 2, 169, 170, 7, 112, 2, 2, 170, 171, 7, 113, 2, 2, 171, 173, 7, 118, 2, 2, 172, 166, 3, 2, 2, 2, 172, 169, 3, 2, 2, 2, 173, 46, 3, 2, 2, 2, 174, 178, 9, 5, 2, 2, 175, 177, 9, 6, 2, 2, 176, 175, 3, 2, 2, 2, 177, 180, 3, 2, 2, 2, 178, 176, 3, 2, 2, 2, 178, 179, 3, 2, 2, 2, 179, 48, 3, 2, 2, 2, 180, 178, 3, 2, 2, 2, 181, 183, 9, 7, 2, 2, 182, 181, 3, 2, 2, 2, 183, 184, 3, 2, 2, 2, 184, 182, 3, 2, 2, 2, 184, 185, 3, 2, 2, 2, 185, 194, 3, 2, 2, 2, 186, 188, 7, 48, 2, 2, 187, 189, 9, 8, 2, 2, 188, 187, 3, 2, 2, 2, 189, 190, 3, 2, 2, 2, 190, 188, 3, 2, 2, 2, 190, 191, 3, 2, 2, 2, 191, 193, 3, 2, 2, 2, 192, 186, 3, 2, 2, 2, 193, 196, 3, 2, 2, 2, 194, 192, 3, 2, 2, 2, 194, 195, 3, 2, 2, 2, 195, 50, 3, 2, 2, 2, 196, 194, 3, 2, 2, 2, 197, 201, 7, 41, 2, 2, 198, 200, 10, 9, 2, 2, 199, 198, 3, 2, 2, 2, 200, 203, 3, 2, 2, 2, 201, 199, 3, 2, 2, 2, 201, 202, 3, 2, 2, 2, 202, 204, 3, 2, 2, 2, 203, 201, 3, 2, 2, 2, 204, 205, 7, 41, 2, 2, 205, 52, 3, 2, 2, 2, 206, 210, 7, 36, 2, 2, 207, 209, 10, 10, 2, 2, 208, 207, 3, 2, 2, 2, 209, 212, 3, 2, 2, 2, 210, 208, 3, 2, 2, 2, 210, 211, 3, 2, 2, 2, 211, 213, 3, 2, 2, 2, 212, 210, 3, 2, 2, 2, 213, 214, 7, 36, 2, 2, 214, 54, 3, 2, 2, 2, 215, 216, 7, 42, 2, 2, 216, 56, 3, 2, 2, 2, 217, 218, 7, 43, 2, 2, 218, 58, 3, 2, 2, 2, 219, 220, 7, 93, 2, 2, 220, 60, 3, 2, 2, 2, 221, 222, 7, 95, 2, 2, 222, 62, 3, 2, 2, 2, 223, 224, 7, 125, 2, 2, 224, 64, 3, 2, 2, 2, 225, 226, 7, 127, 2, 2, 226, 66, 3, 2, 2, 2, 227, 228, 7, 38, 2, 2, 228, 229, 7, 125, 2, 2, 229, 68, 3, 2, 2, 2, 20, 2, 107, 113, 123, 135, 138, 143, 149, 152, 157, 162, 172, 178, 184, 190, 194, 201, 210, 3, 8, 2, 2] \ No newline at end of file diff --git a/internal/pkg/eql/parser/eql_lexer.go b/internal/pkg/eql/parser/eql_lexer.go index da1bf4d112e..d817439a858 100644 --- a/internal/pkg/eql/parser/eql_lexer.go +++ b/internal/pkg/eql/parser/eql_lexer.go @@ -46,84 +46,85 @@ var serializedLexerAtn = []uint16{ 34, 2, 2, 35, 3, 3, 5, 4, 7, 5, 9, 6, 11, 7, 13, 8, 15, 9, 17, 10, 19, 11, 21, 12, 23, 13, 25, 14, 27, 15, 29, 16, 31, 17, 33, 18, 35, 19, 37, 20, 39, 21, 41, 22, 43, 23, 45, 24, 47, 25, 49, 26, 51, 27, 53, 28, 55, - 29, 57, 30, 59, 31, 61, 32, 63, 33, 65, 34, 67, 35, 3, 2, 10, 3, 2, 47, + 29, 57, 30, 59, 31, 61, 32, 63, 33, 65, 34, 67, 35, 3, 2, 11, 3, 2, 47, 47, 3, 2, 50, 59, 5, 2, 11, 12, 15, 15, 34, 34, 5, 2, 67, 92, 97, 97, 99, - 124, 6, 2, 50, 59, 67, 92, 97, 97, 99, 124, 7, 2, 48, 48, 50, 59, 67, 92, - 97, 97, 99, 124, 5, 2, 12, 12, 15, 15, 41, 41, 5, 2, 12, 12, 15, 15, 36, - 36, 2, 246, 2, 3, 3, 2, 2, 2, 2, 5, 3, 2, 2, 2, 2, 7, 3, 2, 2, 2, 2, 9, - 3, 2, 2, 2, 2, 11, 3, 2, 2, 2, 2, 13, 3, 2, 2, 2, 2, 15, 3, 2, 2, 2, 2, - 17, 3, 2, 2, 2, 2, 19, 3, 2, 2, 2, 2, 21, 3, 2, 2, 2, 2, 23, 3, 2, 2, 2, - 2, 25, 3, 2, 2, 2, 2, 27, 3, 2, 2, 2, 2, 29, 3, 2, 2, 2, 2, 31, 3, 2, 2, - 2, 2, 33, 3, 2, 2, 2, 2, 35, 3, 2, 2, 2, 2, 37, 3, 2, 2, 2, 2, 39, 3, 2, - 2, 2, 2, 41, 3, 2, 2, 2, 2, 43, 3, 2, 2, 2, 2, 45, 3, 2, 2, 2, 2, 47, 3, - 2, 2, 2, 2, 49, 3, 2, 2, 2, 2, 51, 3, 2, 2, 2, 2, 53, 3, 2, 2, 2, 2, 55, - 3, 2, 2, 2, 2, 57, 3, 2, 2, 2, 2, 59, 3, 2, 2, 2, 2, 61, 3, 2, 2, 2, 2, - 63, 3, 2, 2, 2, 2, 65, 3, 2, 2, 2, 2, 67, 3, 2, 2, 2, 3, 69, 3, 2, 2, 2, - 5, 71, 3, 2, 2, 2, 7, 73, 3, 2, 2, 2, 9, 75, 3, 2, 2, 2, 11, 78, 3, 2, - 2, 2, 13, 81, 3, 2, 2, 2, 15, 83, 3, 2, 2, 2, 17, 85, 3, 2, 2, 2, 19, 88, - 3, 2, 2, 2, 21, 91, 3, 2, 2, 2, 23, 93, 3, 2, 2, 2, 25, 95, 3, 2, 2, 2, - 27, 97, 3, 2, 2, 2, 29, 99, 3, 2, 2, 2, 31, 107, 3, 2, 2, 2, 33, 113, 3, - 2, 2, 2, 35, 123, 3, 2, 2, 2, 37, 135, 3, 2, 2, 2, 39, 138, 3, 2, 2, 2, - 41, 152, 3, 2, 2, 2, 43, 160, 3, 2, 2, 2, 45, 172, 3, 2, 2, 2, 47, 174, - 3, 2, 2, 2, 49, 182, 3, 2, 2, 2, 51, 197, 3, 2, 2, 2, 53, 206, 3, 2, 2, - 2, 55, 215, 3, 2, 2, 2, 57, 217, 3, 2, 2, 2, 59, 219, 3, 2, 2, 2, 61, 221, - 3, 2, 2, 2, 63, 223, 3, 2, 2, 2, 65, 225, 3, 2, 2, 2, 67, 227, 3, 2, 2, - 2, 69, 70, 7, 126, 2, 2, 70, 4, 3, 2, 2, 2, 71, 72, 7, 46, 2, 2, 72, 6, - 3, 2, 2, 2, 73, 74, 7, 60, 2, 2, 74, 8, 3, 2, 2, 2, 75, 76, 7, 63, 2, 2, - 76, 77, 7, 63, 2, 2, 77, 10, 3, 2, 2, 2, 78, 79, 7, 35, 2, 2, 79, 80, 7, - 63, 2, 2, 80, 12, 3, 2, 2, 2, 81, 82, 7, 64, 2, 2, 82, 14, 3, 2, 2, 2, - 83, 84, 7, 62, 2, 2, 84, 16, 3, 2, 2, 2, 85, 86, 7, 64, 2, 2, 86, 87, 7, - 63, 2, 2, 87, 18, 3, 2, 2, 2, 88, 89, 7, 62, 2, 2, 89, 90, 7, 63, 2, 2, - 90, 20, 3, 2, 2, 2, 91, 92, 7, 45, 2, 2, 92, 22, 3, 2, 2, 2, 93, 94, 7, - 47, 2, 2, 94, 24, 3, 2, 2, 2, 95, 96, 7, 44, 2, 2, 96, 26, 3, 2, 2, 2, - 97, 98, 7, 49, 2, 2, 98, 28, 3, 2, 2, 2, 99, 100, 7, 39, 2, 2, 100, 30, - 3, 2, 2, 2, 101, 102, 7, 99, 2, 2, 102, 103, 7, 112, 2, 2, 103, 108, 7, - 102, 2, 2, 104, 105, 7, 67, 2, 2, 105, 106, 7, 80, 2, 2, 106, 108, 7, 70, - 2, 2, 107, 101, 3, 2, 2, 2, 107, 104, 3, 2, 2, 2, 108, 32, 3, 2, 2, 2, - 109, 110, 7, 113, 2, 2, 110, 114, 7, 116, 2, 2, 111, 112, 7, 81, 2, 2, - 112, 114, 7, 84, 2, 2, 113, 109, 3, 2, 2, 2, 113, 111, 3, 2, 2, 2, 114, - 34, 3, 2, 2, 2, 115, 116, 7, 118, 2, 2, 116, 117, 7, 116, 2, 2, 117, 118, - 7, 119, 2, 2, 118, 124, 7, 103, 2, 2, 119, 120, 7, 86, 2, 2, 120, 121, - 7, 84, 2, 2, 121, 122, 7, 87, 2, 2, 122, 124, 7, 71, 2, 2, 123, 115, 3, - 2, 2, 2, 123, 119, 3, 2, 2, 2, 124, 36, 3, 2, 2, 2, 125, 126, 7, 104, 2, - 2, 126, 127, 7, 99, 2, 2, 127, 128, 7, 110, 2, 2, 128, 129, 7, 117, 2, - 2, 129, 136, 7, 103, 2, 2, 130, 131, 7, 72, 2, 2, 131, 132, 7, 67, 2, 2, - 132, 133, 7, 78, 2, 2, 133, 134, 7, 85, 2, 2, 134, 136, 7, 71, 2, 2, 135, - 125, 3, 2, 2, 2, 135, 130, 3, 2, 2, 2, 136, 38, 3, 2, 2, 2, 137, 139, 9, - 2, 2, 2, 138, 137, 3, 2, 2, 2, 138, 139, 3, 2, 2, 2, 139, 141, 3, 2, 2, - 2, 140, 142, 9, 3, 2, 2, 141, 140, 3, 2, 2, 2, 142, 143, 3, 2, 2, 2, 143, - 141, 3, 2, 2, 2, 143, 144, 3, 2, 2, 2, 144, 145, 3, 2, 2, 2, 145, 147, - 7, 48, 2, 2, 146, 148, 9, 3, 2, 2, 147, 146, 3, 2, 2, 2, 148, 149, 3, 2, - 2, 2, 149, 147, 3, 2, 2, 2, 149, 150, 3, 2, 2, 2, 150, 40, 3, 2, 2, 2, - 151, 153, 9, 2, 2, 2, 152, 151, 3, 2, 2, 2, 152, 153, 3, 2, 2, 2, 153, - 155, 3, 2, 2, 2, 154, 156, 9, 3, 2, 2, 155, 154, 3, 2, 2, 2, 156, 157, - 3, 2, 2, 2, 157, 155, 3, 2, 2, 2, 157, 158, 3, 2, 2, 2, 158, 42, 3, 2, - 2, 2, 159, 161, 9, 4, 2, 2, 160, 159, 3, 2, 2, 2, 161, 162, 3, 2, 2, 2, - 162, 160, 3, 2, 2, 2, 162, 163, 3, 2, 2, 2, 163, 164, 3, 2, 2, 2, 164, - 165, 8, 22, 2, 2, 165, 44, 3, 2, 2, 2, 166, 167, 7, 80, 2, 2, 167, 168, - 7, 81, 2, 2, 168, 173, 7, 86, 2, 2, 169, 170, 7, 112, 2, 2, 170, 171, 7, - 113, 2, 2, 171, 173, 7, 118, 2, 2, 172, 166, 3, 2, 2, 2, 172, 169, 3, 2, - 2, 2, 173, 46, 3, 2, 2, 2, 174, 178, 9, 5, 2, 2, 175, 177, 9, 6, 2, 2, - 176, 175, 3, 2, 2, 2, 177, 180, 3, 2, 2, 2, 178, 176, 3, 2, 2, 2, 178, - 179, 3, 2, 2, 2, 179, 48, 3, 2, 2, 2, 180, 178, 3, 2, 2, 2, 181, 183, 9, - 7, 2, 2, 182, 181, 3, 2, 2, 2, 183, 184, 3, 2, 2, 2, 184, 182, 3, 2, 2, - 2, 184, 185, 3, 2, 2, 2, 185, 194, 3, 2, 2, 2, 186, 188, 7, 48, 2, 2, 187, - 189, 9, 6, 2, 2, 188, 187, 3, 2, 2, 2, 189, 190, 3, 2, 2, 2, 190, 188, - 3, 2, 2, 2, 190, 191, 3, 2, 2, 2, 191, 193, 3, 2, 2, 2, 192, 186, 3, 2, - 2, 2, 193, 196, 3, 2, 2, 2, 194, 192, 3, 2, 2, 2, 194, 195, 3, 2, 2, 2, - 195, 50, 3, 2, 2, 2, 196, 194, 3, 2, 2, 2, 197, 201, 7, 41, 2, 2, 198, - 200, 10, 8, 2, 2, 199, 198, 3, 2, 2, 2, 200, 203, 3, 2, 2, 2, 201, 199, - 3, 2, 2, 2, 201, 202, 3, 2, 2, 2, 202, 204, 3, 2, 2, 2, 203, 201, 3, 2, - 2, 2, 204, 205, 7, 41, 2, 2, 205, 52, 3, 2, 2, 2, 206, 210, 7, 36, 2, 2, - 207, 209, 10, 9, 2, 2, 208, 207, 3, 2, 2, 2, 209, 212, 3, 2, 2, 2, 210, - 208, 3, 2, 2, 2, 210, 211, 3, 2, 2, 2, 211, 213, 3, 2, 2, 2, 212, 210, - 3, 2, 2, 2, 213, 214, 7, 36, 2, 2, 214, 54, 3, 2, 2, 2, 215, 216, 7, 42, - 2, 2, 216, 56, 3, 2, 2, 2, 217, 218, 7, 43, 2, 2, 218, 58, 3, 2, 2, 2, - 219, 220, 7, 93, 2, 2, 220, 60, 3, 2, 2, 2, 221, 222, 7, 95, 2, 2, 222, - 62, 3, 2, 2, 2, 223, 224, 7, 125, 2, 2, 224, 64, 3, 2, 2, 2, 225, 226, - 7, 127, 2, 2, 226, 66, 3, 2, 2, 2, 227, 228, 7, 38, 2, 2, 228, 229, 7, - 125, 2, 2, 229, 68, 3, 2, 2, 2, 20, 2, 107, 113, 123, 135, 138, 143, 149, - 152, 157, 162, 172, 178, 184, 190, 194, 201, 210, 3, 8, 2, 2, + 124, 6, 2, 50, 59, 67, 92, 97, 97, 99, 124, 7, 2, 47, 48, 50, 59, 67, 92, + 97, 97, 99, 124, 7, 2, 47, 47, 50, 59, 67, 92, 97, 97, 99, 124, 5, 2, 12, + 12, 15, 15, 41, 41, 5, 2, 12, 12, 15, 15, 36, 36, 2, 246, 2, 3, 3, 2, 2, + 2, 2, 5, 3, 2, 2, 2, 2, 7, 3, 2, 2, 2, 2, 9, 3, 2, 2, 2, 2, 11, 3, 2, 2, + 2, 2, 13, 3, 2, 2, 2, 2, 15, 3, 2, 2, 2, 2, 17, 3, 2, 2, 2, 2, 19, 3, 2, + 2, 2, 2, 21, 3, 2, 2, 2, 2, 23, 3, 2, 2, 2, 2, 25, 3, 2, 2, 2, 2, 27, 3, + 2, 2, 2, 2, 29, 3, 2, 2, 2, 2, 31, 3, 2, 2, 2, 2, 33, 3, 2, 2, 2, 2, 35, + 3, 2, 2, 2, 2, 37, 3, 2, 2, 2, 2, 39, 3, 2, 2, 2, 2, 41, 3, 2, 2, 2, 2, + 43, 3, 2, 2, 2, 2, 45, 3, 2, 2, 2, 2, 47, 3, 2, 2, 2, 2, 49, 3, 2, 2, 2, + 2, 51, 3, 2, 2, 2, 2, 53, 3, 2, 2, 2, 2, 55, 3, 2, 2, 2, 2, 57, 3, 2, 2, + 2, 2, 59, 3, 2, 2, 2, 2, 61, 3, 2, 2, 2, 2, 63, 3, 2, 2, 2, 2, 65, 3, 2, + 2, 2, 2, 67, 3, 2, 2, 2, 3, 69, 3, 2, 2, 2, 5, 71, 3, 2, 2, 2, 7, 73, 3, + 2, 2, 2, 9, 75, 3, 2, 2, 2, 11, 78, 3, 2, 2, 2, 13, 81, 3, 2, 2, 2, 15, + 83, 3, 2, 2, 2, 17, 85, 3, 2, 2, 2, 19, 88, 3, 2, 2, 2, 21, 91, 3, 2, 2, + 2, 23, 93, 3, 2, 2, 2, 25, 95, 3, 2, 2, 2, 27, 97, 3, 2, 2, 2, 29, 99, + 3, 2, 2, 2, 31, 107, 3, 2, 2, 2, 33, 113, 3, 2, 2, 2, 35, 123, 3, 2, 2, + 2, 37, 135, 3, 2, 2, 2, 39, 138, 3, 2, 2, 2, 41, 152, 3, 2, 2, 2, 43, 160, + 3, 2, 2, 2, 45, 172, 3, 2, 2, 2, 47, 174, 3, 2, 2, 2, 49, 182, 3, 2, 2, + 2, 51, 197, 3, 2, 2, 2, 53, 206, 3, 2, 2, 2, 55, 215, 3, 2, 2, 2, 57, 217, + 3, 2, 2, 2, 59, 219, 3, 2, 2, 2, 61, 221, 3, 2, 2, 2, 63, 223, 3, 2, 2, + 2, 65, 225, 3, 2, 2, 2, 67, 227, 3, 2, 2, 2, 69, 70, 7, 126, 2, 2, 70, + 4, 3, 2, 2, 2, 71, 72, 7, 46, 2, 2, 72, 6, 3, 2, 2, 2, 73, 74, 7, 60, 2, + 2, 74, 8, 3, 2, 2, 2, 75, 76, 7, 63, 2, 2, 76, 77, 7, 63, 2, 2, 77, 10, + 3, 2, 2, 2, 78, 79, 7, 35, 2, 2, 79, 80, 7, 63, 2, 2, 80, 12, 3, 2, 2, + 2, 81, 82, 7, 64, 2, 2, 82, 14, 3, 2, 2, 2, 83, 84, 7, 62, 2, 2, 84, 16, + 3, 2, 2, 2, 85, 86, 7, 64, 2, 2, 86, 87, 7, 63, 2, 2, 87, 18, 3, 2, 2, + 2, 88, 89, 7, 62, 2, 2, 89, 90, 7, 63, 2, 2, 90, 20, 3, 2, 2, 2, 91, 92, + 7, 45, 2, 2, 92, 22, 3, 2, 2, 2, 93, 94, 7, 47, 2, 2, 94, 24, 3, 2, 2, + 2, 95, 96, 7, 44, 2, 2, 96, 26, 3, 2, 2, 2, 97, 98, 7, 49, 2, 2, 98, 28, + 3, 2, 2, 2, 99, 100, 7, 39, 2, 2, 100, 30, 3, 2, 2, 2, 101, 102, 7, 99, + 2, 2, 102, 103, 7, 112, 2, 2, 103, 108, 7, 102, 2, 2, 104, 105, 7, 67, + 2, 2, 105, 106, 7, 80, 2, 2, 106, 108, 7, 70, 2, 2, 107, 101, 3, 2, 2, + 2, 107, 104, 3, 2, 2, 2, 108, 32, 3, 2, 2, 2, 109, 110, 7, 113, 2, 2, 110, + 114, 7, 116, 2, 2, 111, 112, 7, 81, 2, 2, 112, 114, 7, 84, 2, 2, 113, 109, + 3, 2, 2, 2, 113, 111, 3, 2, 2, 2, 114, 34, 3, 2, 2, 2, 115, 116, 7, 118, + 2, 2, 116, 117, 7, 116, 2, 2, 117, 118, 7, 119, 2, 2, 118, 124, 7, 103, + 2, 2, 119, 120, 7, 86, 2, 2, 120, 121, 7, 84, 2, 2, 121, 122, 7, 87, 2, + 2, 122, 124, 7, 71, 2, 2, 123, 115, 3, 2, 2, 2, 123, 119, 3, 2, 2, 2, 124, + 36, 3, 2, 2, 2, 125, 126, 7, 104, 2, 2, 126, 127, 7, 99, 2, 2, 127, 128, + 7, 110, 2, 2, 128, 129, 7, 117, 2, 2, 129, 136, 7, 103, 2, 2, 130, 131, + 7, 72, 2, 2, 131, 132, 7, 67, 2, 2, 132, 133, 7, 78, 2, 2, 133, 134, 7, + 85, 2, 2, 134, 136, 7, 71, 2, 2, 135, 125, 3, 2, 2, 2, 135, 130, 3, 2, + 2, 2, 136, 38, 3, 2, 2, 2, 137, 139, 9, 2, 2, 2, 138, 137, 3, 2, 2, 2, + 138, 139, 3, 2, 2, 2, 139, 141, 3, 2, 2, 2, 140, 142, 9, 3, 2, 2, 141, + 140, 3, 2, 2, 2, 142, 143, 3, 2, 2, 2, 143, 141, 3, 2, 2, 2, 143, 144, + 3, 2, 2, 2, 144, 145, 3, 2, 2, 2, 145, 147, 7, 48, 2, 2, 146, 148, 9, 3, + 2, 2, 147, 146, 3, 2, 2, 2, 148, 149, 3, 2, 2, 2, 149, 147, 3, 2, 2, 2, + 149, 150, 3, 2, 2, 2, 150, 40, 3, 2, 2, 2, 151, 153, 9, 2, 2, 2, 152, 151, + 3, 2, 2, 2, 152, 153, 3, 2, 2, 2, 153, 155, 3, 2, 2, 2, 154, 156, 9, 3, + 2, 2, 155, 154, 3, 2, 2, 2, 156, 157, 3, 2, 2, 2, 157, 155, 3, 2, 2, 2, + 157, 158, 3, 2, 2, 2, 158, 42, 3, 2, 2, 2, 159, 161, 9, 4, 2, 2, 160, 159, + 3, 2, 2, 2, 161, 162, 3, 2, 2, 2, 162, 160, 3, 2, 2, 2, 162, 163, 3, 2, + 2, 2, 163, 164, 3, 2, 2, 2, 164, 165, 8, 22, 2, 2, 165, 44, 3, 2, 2, 2, + 166, 167, 7, 80, 2, 2, 167, 168, 7, 81, 2, 2, 168, 173, 7, 86, 2, 2, 169, + 170, 7, 112, 2, 2, 170, 171, 7, 113, 2, 2, 171, 173, 7, 118, 2, 2, 172, + 166, 3, 2, 2, 2, 172, 169, 3, 2, 2, 2, 173, 46, 3, 2, 2, 2, 174, 178, 9, + 5, 2, 2, 175, 177, 9, 6, 2, 2, 176, 175, 3, 2, 2, 2, 177, 180, 3, 2, 2, + 2, 178, 176, 3, 2, 2, 2, 178, 179, 3, 2, 2, 2, 179, 48, 3, 2, 2, 2, 180, + 178, 3, 2, 2, 2, 181, 183, 9, 7, 2, 2, 182, 181, 3, 2, 2, 2, 183, 184, + 3, 2, 2, 2, 184, 182, 3, 2, 2, 2, 184, 185, 3, 2, 2, 2, 185, 194, 3, 2, + 2, 2, 186, 188, 7, 48, 2, 2, 187, 189, 9, 8, 2, 2, 188, 187, 3, 2, 2, 2, + 189, 190, 3, 2, 2, 2, 190, 188, 3, 2, 2, 2, 190, 191, 3, 2, 2, 2, 191, + 193, 3, 2, 2, 2, 192, 186, 3, 2, 2, 2, 193, 196, 3, 2, 2, 2, 194, 192, + 3, 2, 2, 2, 194, 195, 3, 2, 2, 2, 195, 50, 3, 2, 2, 2, 196, 194, 3, 2, + 2, 2, 197, 201, 7, 41, 2, 2, 198, 200, 10, 9, 2, 2, 199, 198, 3, 2, 2, + 2, 200, 203, 3, 2, 2, 2, 201, 199, 3, 2, 2, 2, 201, 202, 3, 2, 2, 2, 202, + 204, 3, 2, 2, 2, 203, 201, 3, 2, 2, 2, 204, 205, 7, 41, 2, 2, 205, 52, + 3, 2, 2, 2, 206, 210, 7, 36, 2, 2, 207, 209, 10, 10, 2, 2, 208, 207, 3, + 2, 2, 2, 209, 212, 3, 2, 2, 2, 210, 208, 3, 2, 2, 2, 210, 211, 3, 2, 2, + 2, 211, 213, 3, 2, 2, 2, 212, 210, 3, 2, 2, 2, 213, 214, 7, 36, 2, 2, 214, + 54, 3, 2, 2, 2, 215, 216, 7, 42, 2, 2, 216, 56, 3, 2, 2, 2, 217, 218, 7, + 43, 2, 2, 218, 58, 3, 2, 2, 2, 219, 220, 7, 93, 2, 2, 220, 60, 3, 2, 2, + 2, 221, 222, 7, 95, 2, 2, 222, 62, 3, 2, 2, 2, 223, 224, 7, 125, 2, 2, + 224, 64, 3, 2, 2, 2, 225, 226, 7, 127, 2, 2, 226, 66, 3, 2, 2, 2, 227, + 228, 7, 38, 2, 2, 228, 229, 7, 125, 2, 2, 229, 68, 3, 2, 2, 2, 20, 2, 107, + 113, 123, 135, 138, 143, 149, 152, 157, 162, 172, 178, 184, 190, 194, 201, + 210, 3, 8, 2, 2, } var lexerDeserializer = antlr.NewATNDeserializer(nil) From 0560b466d838d07b2565536cd96e2cfa98165e89 Mon Sep 17 00:00:00 2001 From: Michel Laterman <82832767+michel-laterman@users.noreply.github.com> Date: Wed, 13 Jul 2022 12:41:20 -0700 Subject: [PATCH 044/180] Fix transpiler to allow : in dynamic variables. (#680) Fix transpiler regex to allow ':' characters in dynamic variables so that users can input "${dynamic.lookup|'fallback.here'}". Co-authored-by: Aleksandr Maus --- CHANGELOG.next.asciidoc | 1 + internal/pkg/agent/transpiler/vars.go | 2 +- internal/pkg/agent/transpiler/vars_test.go | 12 ++++++++++++ 3 files changed, 14 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index c9e71a035b7..06b85fa7e96 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -107,6 +107,7 @@ - Collects stdout and stderr of applications run as a process and logs them. {issue}[88] - Remove VerificationMode option to empty string. Default value is `full`. {issue}[184] - diagnostics collect file mod times are set. {pull}570[570] +- Allow ':' characters in dynamic variables {issue}624[624] {pull}680[680] - Allow the - char to appear as part of variable names in eql expressions. {issue}709[709] {pull}710[710] ==== New features diff --git a/internal/pkg/agent/transpiler/vars.go b/internal/pkg/agent/transpiler/vars.go index 8daacf606fe..a9f96b15ee8 100644 --- a/internal/pkg/agent/transpiler/vars.go +++ b/internal/pkg/agent/transpiler/vars.go @@ -14,7 +14,7 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/core/composable" ) -var varsRegex = regexp.MustCompile(`\${([\p{L}\d\s\\\-_|.'"]*)}`) +var varsRegex = regexp.MustCompile(`\${([\p{L}\d\s\\\-_|.'":]*)}`) // ErrNoMatch is return when the replace didn't fail, just that no vars match to perform the replace. var ErrNoMatch = fmt.Errorf("no matching vars") diff --git a/internal/pkg/agent/transpiler/vars_test.go b/internal/pkg/agent/transpiler/vars_test.go index 8171a4b7a9f..142ab132109 100644 --- a/internal/pkg/agent/transpiler/vars_test.go +++ b/internal/pkg/agent/transpiler/vars_test.go @@ -81,12 +81,24 @@ func TestVars_Replace(t *testing.T) { false, false, }, + { + `${"with:colon"}`, + NewStrVal("with:colon"), + false, + false, + }, { `${"direct"}`, NewStrVal("direct"), false, false, }, + { + `${un-der_score.missing|'with:colon'}`, + NewStrVal("with:colon"), + false, + false, + }, { `${un-der_score.}`, NewStrVal(""), From 4dcc16b82ea10bbafde1bd88658d9380e1194e3d Mon Sep 17 00:00:00 2001 From: Craig MacKenzie Date: Thu, 14 Jul 2022 11:25:15 -0400 Subject: [PATCH 045/180] Fix for the filebeat spec file picking up packetbeat inputs (#700) * Reproduce filebeat picking up packetbeat inputs * Filebeat: filter inputs as first input transform. Move input filtering to be the first input transformation that occurs in the filebeat spec file. Fixes https://github.com/elastic/elastic-agent/issues/427. * Update changelog. --- CHANGELOG.next.asciidoc | 1 + internal/pkg/agent/program/supported.go | 2 +- .../testdata/single_config-packetbeat.yml | 7 ++ .../agent/program/testdata/single_config.yml | 7 ++ internal/spec/filebeat.yml | 66 +++++++++++-------- 5 files changed, 54 insertions(+), 29 deletions(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 06b85fa7e96..33aa1756f0d 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -193,3 +193,4 @@ - Add `@metadata.input_id` and `@metadata.stream_id` when applying the inject stream processor {pull}527[527] - Add liveness endpoint, allow fleet-gateway component to report degraded state, add update time and messages to status output. {issue}390[390] {pull}569[569] - Redact sensitive information on diagnostics collect command. {issue}[241] {pull}[566] +- Fix incorrectly creating a filebeat redis input when a policy contains a packetbeat redis input. {issue}[427] {pull}[700] diff --git a/internal/pkg/agent/program/supported.go b/internal/pkg/agent/program/supported.go index fa35f4a4fe3..985ebb03709 100644 --- a/internal/pkg/agent/program/supported.go +++ b/internal/pkg/agent/program/supported.go @@ -27,7 +27,7 @@ func init() { // internal/spec/metricbeat.yml // internal/spec/osquerybeat.yml // internal/spec/packetbeat.yml - unpacked := packer.MustUnpack("eJzce0mTqziX9v77GbX9ursY0llFR7wLQyaTnWQZXyOhHZKcYFtgV9rYxh393zskBgMmx7r19hu9yIh7sdBwdIbnPOfwX7+sssPyNYvYr/vdkvwa7dJ/3y9fj8vX/yhS9st//oJT84B+bONZoHvTwGMkQ4zEuzUGs3vHMk94Ll8QdBUEnUkIXSkCKAnVwd8yctnG4LSNHcM5+HNn7xjuIQSjBCnBAYGRNE2DPATuHoGZRm1XRnNnb6zGsbOSTWd1ip2UrqGqM5J6DGczzbUP+uJR/uEHLvAD98WXNHt22Z6fHnTNiXfUSIM7YmkFtYINVGRGbXcXqk/3jrmfOMZ4FUL9MIXVmVbO3mDShGTBHsGne77udK6vsaqPoOofoXLeEXUmnjvGOHYsJiEg3TsW2iMQSM1z2z8+r/QdznSZ2k8T8cwYx1gZvYSKlqP0vCvlMzpidcx/PziWnJCHbTOWWKYUPWxjlJ4ZgrPr89be6mfTuV4gIB9pGrxESjB6jrfNb+Wf/orght/HOlSCC5G1hFhMjP3WPLbLSpmyHJ3aY6SYpMEBq4hB5cCWP67nqf/EvCud33dOx1vxDkrZHVQ9iaRBgn9s46UqVTJBO2z7jDBNCcFZ7pzb9hi2gjW1tGJI1tU60hLq7PoOSrAdMHLp7Osg9HTW7GVPraC4nl2/IHBmoeofSXYj95t1y/k0mdq6XJ7vKpvWXR4ci+VRGqypqW0RMDcIupfnlf7by2ynRlaQP6/0PQKjjFrx1rUP1TqeNpmP/7/zMI5DMNo4VpIQ6cCW83izVKo1bWnvGJRhy7xQi62JEiQk9bZucYpd1WXIYhe3OPE9ZJFippHymE2NcYYtLSOqnxAlziaz7T9++be+V8jp6oCX0aHvFGCwiaC/QsCUjNQ7oIdtHHaemQVqGdwUPK2mwvCuY6ZpsA+hJ0Xg6RACyg07XwJ576xO5TuLhXiHKIFE4TiPwPnQdhQoNfdEWaym445juSDoy8QYSQjIJ2yZEpqPGE7NFbaCzR+AK4PH+mtgYOZiDyDIqdEaD/2Cgps19lihWQRG2TQ9M5oG+z+Az8IsyPrzIsV7IVZQhFzxH6TNUjID/5EtZptAD0zN/iHRh+f14+nJlvprJBT4O2q5LISzvHQgLJum8hHZwhnkCIySkCvXg5yG4HxBc2cCZ7uEZP4Opeaa8jtJg4TaT8fO3WQuE5ee+S/cOWE1kIgdSFDxtiEYZfx+hROebVtOcdhgjdVTPAPnvfNoyshikljPqAzEKA2RFPoKAbrDK12NoL91DFqezfg9m8bbAYP3XqjCpMjUCgQoW9rjygBrR+SPiLUojc922fNqnMG0XH9a3L26lXMiSrBHwJOw6txzw+NnJKftxCl0HVn+xTHoDqc+Wz5sY2EIhfwbskwpDLQLtV0WAikX/4ceo7Z0WCp+Qi3zhah+gYB5mKajBIPgQixzjaCU8QDgWEESKnEcgdGJwlnO5RaB0Z98H1BhObKCu9rJUJuduKzF2u192b6KCz1Blv+CUsaweKYXWPEYUT3ukLgTOuJ0EaNUKxwrUMogKvZ3QXBWOQotR3M9xarD596E0E+a+5uPxP+5bU2Nm9/EvU0NPcHpLO7LRPgA6J1C4LHy/hqnVo2RX4jtHkXQUbTGqd4GQpYTJSioqSUo8xl5Wy4PWBkpCLpSfY9AlvL6Hoh8oPw5tX6v5O5f+O9CZ2EiEW4LpraPoLgD7hcu9Z7qgFu/V+nZjlrB4Xml1+epn0v40horzt4K4LaeUCtu69pQ8F47j+XvpR4HF8dCR7LS9Za9c7vNInAXCzswxllpv7OjW9zFPkSMZEyKeCDmesJlt9KlyDK5fC7NOraQj4TALA7hU0ythDlW5T/meh4Cmd9fHdS4nHKinBNqBW/YXxekiHuatYLdjS3G+aTYZO13xbwP29h9eJy0wIPYSwd4WKjAitS1deN6BxhoGwrOrCXT1LEe48ZeDX1HCv1CrGAdAbQTMlC0nKZBwf1SJSuhM65S243PgzF2BcCTqVvcNXvEwHxFgZaQzE26AKfRlcaeOue1fr93rOv81W8HLisMNAUFmvBp7XcqENOf7zPvNHHrq+s0tj/bfgwKLQHyhDz5PUdAy5t7MF1GYLAj6SIOeVywvCNO0Q4VuoQLfY0VmTm2vyWpJiMlLnXy0cy4/3KMJMeFvseKl2BDTyNwZqTY5F84SxlXTK3gugEVtMNWUNnpW6CyBfTWN36gAtCVzVSyoam5pyC4tGTCwaTQeTRuPxPrv0SAYxFaRNBvAW8pJiq7cN0x4u3aeTQ3yH6KqU2PjmVukKmlEQj2/Fnbl5JCAM3ar8c1borAoM9vACFUShlPU7aZGgPPMy8PoX6JLE0a/p37HG+HlbteTCt/F7Et/f3esQ9aKW/vSOz6jvSUpNrhBrCXCVMb50z43H1/gDL3yPFGFzt2fWxpZw02mlR4pQPqr3jAbe5uOu+u39h7hYka3eM6D8W7rLyvKuYZegPka39S+tkKmBtc50dpBKhMUlPYSi9m9hKB7YQqCcNrHv+CHKn+dmL4v3XPXp/zLyUGIhkgKZ/3SSQDX0oMCNvmdCgxUM5HCnqJQedZLzH4lwL5O/p5cNwD2u0zxjvhgKqMUziCZQOOpNJIVlUmme648+FOYoNg3Mp0ReC7d6zzEalcobvja/AD1RYwXG/fYR2GwZZTgXSeeWKFcmcpgCi2Tp27rEHNFbS4DCksd2y6o1Ycu4q3x2qwKRMAklf75U5cQtB9qZ0mVPn45IWkQYZgcmpl3Lfgnhtturh3DLpsy3e62mYfMQJ9eZWOUsg55QCiTF7ae3QGWYZb1qDreG4cRvxPyeRjVzkkKD0k5b+5jngc+GzdYjO5MdZlRnfbVda3VR+MNsTSdjibxQslWFPo7qi9mYSKvCnBhH/CCsupIV8Q8GSSMmnZ03t0neOArEAxMsGq7UJlce88hOrzQzwJgSccHw8SwpbUUoeehSMMcmTrRx4kjfR8RLJ2CqG/LeXOwYmrRuDu3jGc4w+LrUhqFsu5ZtbymUrX96dCBj6bcnkUWmv/0p9TPnfh8Dn3ERjJPOlzVtqR2LOjD84JUf1dWGjm9R3tQoVf0fZYIcf2OSerEX+24vpCRVKnqeSyjZ3N0z00zzOSahlJzYPzWAIQaJ6b/Yp/12uYZ8J9DbUCArmdy2cyuE7qbRHwXoX8VD/B1um+Sm5YKGsCKPUTGidtyQV6LFQDAUCcalzFyjYBz+EBPmXpcu5cn62kAw8EThMsxyui+jxYFfUzarEDAprMdeHpMp4QS7tQk+/fkzgArO74DgGPJ7N79KOXLBnusJ7V+7DKeNEEYsNt5m7vazqXmzupxl2o5TOSOa1nzmEKgxNS3QRZi95zlxFFk0nqMVK0ZPCGHLvjR/cRHK/qJDECMuP+4Hk1Vp4exhNiuwyqQR6BEdepPX7YTqZznS2tYF2C1EUNlITuP6/Gq7YekKtt1mskJKVdAGq4Mk4b/Vi1gWb/HoflM7DvD8mfK8iCao9lfY+prRKIdgLZB9VcPo1ejIW8bmLJLbBzbpLv4cRI7zPLB54sU7AQZ8LAPPXtqcPI266Mrc5eP0yWROxNg6KTjKxvZdW2yW4VQOolJ7eMs9gHRDuisCOOh4FkOaffi0NnhlMqRQaPQ5X8VGnnPNzFTxUJFFnmZa4EIz5HHete5jz21CSud0HALEIlHgCQOw6COf5JuB1xX4lTTXL4eqor48zfYbDIQ+iuI1uK//ghxa5iFvhHKLlFub5rHwoKRkJHpylKMGD7JazGiriYcNxRnsfwfyNZkAvfNB8dQrA7kqwaeyHZZD6+jZEvK7YcwLM+91mgTA6quCj8rcA5411physdd6pVmceoHZymKdvjT+DSGvvyZJBY54RaiyFyt1cRkxOc8uRLFglga7xEsuAW+1paxvEaKkZ7BBHDD/IGAVdGxYeVNmu+OJufIKyvcjFGVdJD8pqoIcWAHESlziyoxVKOEZ5XusB3pNDaMi4Q5HHdlaDCMUSl86a2jiwzRxxnVLb7kb96G2/fYOC/ndSczvVDCMc9v99PkG/8TFmVVMw9NjUJyyWp2fE3ZVLdIy4/rETWGJwtLY8Re8bzs1VDsBbifpv7K3W8Q0iv4Ky3x6HKKffH0Gf8Pjqkxum7+29kvUIAHUm6eJdA6JE7HTK3Oeupis22X4SC4OBn8F/Evq+kUe++vD5R/HcSyCJWhWB0EXlvl4ys99WtgHLZAfNEOhXO6jn3CyUuE/9uv8P1gaY8V64wprAP0q3u1sUH1ZOQxXJxZsvf4d46wudksyO1vBPfX9jRu/Y8wYY8bGMK/FNvLyUOt7R1pAQcU2yw4r0i6PTmEdi/QMDfEVm7YEtT+bmeV3r57HR7/qnqjQjPGS5bQcq15PAZ0lEmSvByQxj9jcTrewWZb6/fKgj9ZcK0U6DrV9+lOIT0MgUlX0AVUwqVuPsbfGphHPOeQpdNQakfkR1ccbtVzoFSTcapXyzbPkgZSSFg+VU/pLjWnev7dE0M+USBe6HgOi6yfYmYWht/1vNuqe2fosw74qtP/hOrQYFTc4+u415DgF7D635adiPFGJpSa84cQT/lenrd1+8Xbx2UxGQb27X0dzrv2hn//xJ6nXW4rV31IdiQ61w8V7i7/kZ3mOfH17lL8r3yiX+dQG/4vDd1oi44f8zZSHGoaKemCFMXiqwhQljk3xUn9xXcfcV8nySEv0zottbIa5m9zDfxH6vxyeG4xtC3IfSmCG62rn04UujzMRrH1qjmK1SPCcxskL1j0FI/CiLw3Jf4oRe2XB6Gu7/8MgeJF3X+UnG6VW5yaPKaK6/Lc/Gy08eQD1jxmcOkXodY2TnVdGB9hi+Nu506N/nmB3ngW/ziQDGnnXsdWnnbz1m/bwvv7KG2iXf9eRUTrvxjuc96L1ARRYm3uM0YFHqKrYBRY1R37OX1XNcGnSa3i2FT8G2wRqWbs5pPrf2BKHzhQfkI28CNHmR1J97ohBWO7TaioDWwVo2P8yejGXvlesU8/guygjSEwZ7aw51jt5zuzT62WPWkXtfXjZxEN9xwt1de68005fIv4wNU9SPJZh+tfSHK6aYjsPYR0/U47+tkm69oyarcb7M/vW1j7fW7PrH1N1Qw6/5JdQHy5t1y7uRI1EYvWvnO21j3zX2+g2fadkChx5occfytLsVBXPaX5piPRD5CVD0JlcW3zvVmY9L3znjNS+JvdUD2dGs8xE/VY/v+4ZqDW0GCrED4IRHbM7pF4O6+y0eV9tGLjT+5tjIQE5Nl9DrU+Ti3goRk3QJn1HnWCoSfLW5+gwz6Siv1pzoXb4I0Byf+ZaokHGRI3ydjKkAJk6ZoWQGj4YAqa6JLEKouQ0pw126PHm7vdXliuiQqd7QJExdb/J5PTgOK3AOo75HK7wPblmHWd3N1tLdty3WHVRawzxQm33LMbzvkj42v65hb4HXQ6H4OCdx0FwjZ0DIpmguwKog2apmXyCA7I/7HbYdBujy8rsiABf4AgURStq40svogoWryVyqqdvijg7ptYIct6WOKtaZxM59hqO9FafHj1oQvfthwPiKFp18kx4ICOWnIClYUkP68WShrJwTdNZ/3j7n/249FsFhs2MMnqNkDgn4RAa/qH6l7skYcnguKcehcxpseoStrkgb8JgtqakfMtDpVeQmVJMEp5dZZan7W0JlvtER8ole2uvuKpvtcO4TFPZU48z4qaeYGKoYASQ1F0LRcNB+M/Cw68R2q94Y67MGfKt0NtARb5xdqaS/YYhf6sL3piWyt2YIPN2fPsaJ1qDUEkzWCuiQgeVZBFlj260dgVvax17ZiCH06kVRbI+hduCfikaO312Fquv5wptvL/N1zXO8wDdJrGh7cUdtNuE4IOZmaoGKvZcke7FSb8066e+w9733cM1j6fCtyDZQ7RVuLeZdPCq22zYs7fr9v/X+7BNrT9xs95XLHmSf85NttPq07uO3tLbiNw1X86+LhLKL8H6u718n8VkblPKL16N4x/DYiKCF1GSvac9c9iDfUeKd38ErZHUJwYFAxC5Kao0E9bvxED4aXutLs+Yt9v633ZpOPYf1btMJn3ul+z/BTqOPvzzGQIn3iDHZwQYHWSjF+am9qJ26Kd/u61dCPnZiYD8WGphTasWXnlkbNWjbZwzW1XN6lIzsySFgIfMbnLM8/2J7Wwwf/Av2l2/2f+fK1GIJ/qnemICiW3ar8kaimjKA76lfmv1CV/zr0+0p36aeq8e73O1CrT71wXz7vdt9pFwIDRrLN5HtdZvX7rOwS/Wx3me3vMAiOFM7unYfHU7tj9o1ksc2w159t9WBb3SnbUv/avQRaC7I0JsfhquhUwBYTMoyAYPe460h5aH+7O+Dvq4419/fJasgn29x736z+0ztWP/j29LZDqNvxw5PKRHOMR80xyOX5IcwGk8ZdRDbLIdpmYZnrSAmkTtJo82BzYHXzf5M0FuTgl2nqBwkjH3Mz9t2edCGVQjbLusz7PTzdsW8mitlbH32S7pm/T938RYqk+6HYm/TIKQTeKxrgCf+P9W5/9BX25Jf//n//EwAA//82Bxyl") + unpacked := packer.MustUnpack("eJzce1uTqkiX9v33M/r2m5nmUFZvJuK9EKpA0KJa3JJJ3pGZFqgJ2iWoODH/fSKTg4DUsXv6fWMuKmLvFPKwch2e9azFf/2yTrPVaxqyXw/7Ffk13Cf/fli9Hlev/1Ek7Jf//AUnZoZ+7qK5r7sz32UkRYxE+w0G83vbMk94IV8QdBQE7WkAHSkEKA7Uwd9SctlF4LSLbMPOvIV9sA0nC8AoRoqfITCSZomfB8A5IDDX6MSR0cI+GOtxZK9l016fIjuhG6jqjCQuw+lccyaZvnyUf3q+AzzfefEkbTK/7M5PD7pmR3tqJP4dsbSCWv4WKjKjE2cfqE/3tnmY2sZ4HUA9m8HqTGv7YDBpSlL/gODTPV93ttA3WNVHUPWOUDnviToX47YxjmyLSQhI97aFDgj4UjM+8Y7Pa32PU12mk6epGDPGEVZGL4Gi5Sg570v5jI5YHfPfM9uSY/Kwa54llimFD7sIJWeG4Pw63tpbPTZb6AUC8pEm/kuo+KPnaNf8Vv7prwhu+X1sAsW/EFmLicXEs9+aZ+KwUqYsR6f2M1JEEj/DKmJQydjq5/U89Z+Yd63z+87peCfeQQm7g6orkcSP8c9dtFKlSiZojyceI0xTAnCWO+eeuAxb/oZaWjEk62odaQV1dn0HxXjiM3Lp7CsTejpv9nKgll9cz65fEDizQPWOJL2R+8265XyaTCe6XJ7vKpvWXWa2xfIw8TfU1HYImFsEncvzWv/tZb5XQ8vPn9f6AYFRSq1o50yyah1Xmy7G/99+GEcBGG1tK46JlLHVItqulGrNiXSwDcqwZV6oxTZE8WOSuDunOEWO6jBksYtTnPge0lAxk1B5TGfGOMWWlhLVi4kSpdP57h+//FvfK+R0neFVmPWdAvS3IfTWCJiSkbgZethFQWfMLFDL4GbgaT0Thnd9Zpb4hwC6UgiesgBQbtj5CsgHe30q31kuxTtE8SUKx3kIzlnbUaDEPBBluZ6NO47lgqAnE2MkISCfsGVKaDFiODHX2PK3vwOuDC7rr4GBmYs9AD+nRut56BUU3KxxwApNQzBKZ8mZ0cQ//A48FqR+2p8XKe4Lsfwi4Ir/IG1Xkul7j2w53/q6b2qTnxJ9eN48np4mUn+NmAJvTy2HBXCelw6EpbNEPqKJcAY5AqM44Mr1ICcBOF/Qwp7C+T4mqbdHibmh/E4SP6aTp2PnblKHiUtPvRfunLDqS2TiS1BxdwEYpfx+hROe71pOcdhgjfVTNAfng/1oyshikljPqAzEKA2RFPoaAbrHa10NobezDVqezfiRzqLdgMG7L1RhUmhqBQKUrSbjygBrR+SNiLUsjW/isOf1OIVJuf6suHt1KudEFP+AgCth1b7nhsfPSE67qV3oOrK8i23QPU48tnrYRcIQCvk3ZJlS4GsXOnFYAKRc/B+6jE6kbKV4MbXMF6J6BQJmNktGMQb+hVjmBkEp5QHAtvw4UKIoBKMThfOcyy0Eoz/4PqDCcmT5d7WToRN24rIWa7f3NfFUXOgxsrwXlDCGxZheYMVlRHW5Q+JO6IiTZYQSrbAtXymDqNjfBcF55Si0HC30BKs2n3sbQC9u7m8xEv/ntjUzbn4T9zYz9Bgn86gvE+EDoHsKgMvK+2ucWvWM/EImzlEEHUVrnOptIGQ5UfyCmlqMUo+Rt+XygJWRgqAj1fcIZCmv74HIGeXj1PpRyd278N+FzsJYItwWTO0QQnEH3C9c6j3VAbd+r9KzPbX87Hmt1+epxyV8aT0rzt4K4BM9plbU1rWh4L2xH8vfSz32L7aFjmSt6y1753abhuAuEnZgjNPSfudHp7iLPIgYSZkU8kDM9YTLbq1LoWVy+VyadSZCPhIC8yiATxG1YmZblf9Y6HkAZH5/dVDjcsqJco6p5b9hf12QIu5p3gp2N7YY5dNim7bfFfM+7CLn4XHaAg9iLx3gYaECK1LX1o3rHWCgbSk4s5ZME9t6jBp7NfQ9KfQLsfxNCNBeyEDRcpr4BfdLlayEzjhKbTceD8bYEQBPpk5x1+wRA/MV+VpMUifuApxGVxp76pzX+nFvW9f5q98yLisMNAX5mvBp7XcqENOf7zPvNHHrq+s0tj/ffQwKLQHyhDz5PYdAy5t7MB1GoL8nyTIKeFyw3CNO0B4VuoQLfYMVmdkTb0cSTUZKVOrko5ly/2UbcY4L/YAVN8aGnoTgzEixzb9wljKumFrBdQMqaI8tv7LTt0BlC+htbvxABaArm6lkQxPzQIF/acmEg0mh82jcHhPrv4SAYxFahNBrAW8pIiq7cN0xot3GfjS3aPIU0Qk92pa5RaaWhMA/8LG2LyWFAJq1X49q3BSCQZ/fAEKolDKeJWw7MwbGUzcPoH4JLU0a/p37HHePlbteTCt/F7Et+XFvTzKtlLd7JJP6jvSEJFp2A9jLhKmNc6Z87r4/QKlz5Hijix27Pra0swYbTSu80gH1VzzgNHc3W3TXb+y9wkSN7nGdh+JdVt5XFfMMvQHytT8p/WwFzA2u86MkBFQmiSlspRcze4nAbkqVmOENj39+jlRvNzW837pnr8/5pxIDkQyQhM/7JJKBLyUGhO1yOpQYKOcjBb3EoDPWSwz+pUD+nn4eHPeAdvuM0V44oCrjFI5g1YAjqTSSdZVJJnvufLiT2CIYtTJdEfjubet8RCpX6O7zNfiBagsYbnbvsA7DYMuuQDrPPLFCubMUQBRbp85d1qDmClochhSW2xO6p1YUOYp7wKq/LRMAklf75U5cQtB5qZ0mVPnz8QtJ/BTB+NTKuG/BPTfaZHlvG3TVlu9svUs/YgT68iodpZBzwgFEmby092gPsgy3rEHX8dw4jOhvyeQjR8lilGRx+W+uIy4HPjun2E5vjHWV0v1unfZt1QOjLbG0PU7n0VLxNxQ6ezrZTgNF3pZgwjthheXUkC8IuDJJmLTq6T26zpEhy1eMVLBq+0BZ3tsPgfr8EE0D4ArHx4OEsCW11KFn4Qj9HE30Iw+SRnI+Ilk7BdDblXLn4MRRQ3B3bxv28afF1iQxi9VCM2v5zKTr+zMhA4/NuDwKrbV/6Y8Zn7uw+ZyHEIxknvTZa+1IJvOjB84xUb19UGjm9R3tQoVf0Q5YIcf2OafrER9bc32hIqnTVHLZRfb26R6a5zlJtJQkZmY/lgAEmudmv+Lf9RrmmXBfQy2fQG7n8pkMrpO4OwTcVyE/1YuxdbqvkhsWyJoASv2Exk5acoEuC1RfABC7eq5iZZuAZ/MAn7BktbCvY2sp44HAboLleE1Ujweroh6jFssQ0GSuC0+X8ZRY2oWafP+uxAFgdcd3CLg8mT2gn71kyXCG9azeh1XGiyYQG04zd3tfs4Xc3En13IVaHiOp3Rqzsxn0T0h1YmQte+MOI4omk8RlpGjJ4A05dp8f3YdwvK6TxBDIjPuD5/VYeXoYT8nEYVD18xCMuE4d8MNuOlvobGX5mxKkLmugJHT/eT1et/WAXG2zXiMmCe0CUMORcdLox7oNNPv3OCyfgX1/SP5cQRZUeyzre0xtlUC0E8g+qObyafRiLOR1E0tugZ19k3wPJ0Z6n1nOeLJMwVKcCQPz1LenDiM/cWRsdfb6YbIkYm/iF51kZHMrq7ZNdqsAUi85uWWcxT4g2hOFHXE0DCTLOb1eHDoznFApNHgcquSnSnv74S56qkig0DIvC8Uf8TnqWPey4LGnJnHdCwJmESjRAIDccxDM8U/M7Yj7Spxoks3XUx0Zp94eg2UeQGcTTqTo959S5ChmgX8GklOU6zuTrKBgJHR0lqAYA3ZYwepZERdjjjvK8xjebyT1c+GbFqMsAPsjSatnLySdLsa3MfJlzVYDeNbjPguUyUEVF4W/FThnvC/tcK3jTrUqdRmd+KdZwg74E7i0xr48GSTWOabWcojc7VXE5BgnPPmSRQLYel4iqX+LfS0t5XgNFaMDgojhB3mLgCOj4sNKm7VYns1PENZXuRijKukheU3UkGJADqJSZxbUYgnHCM9rXeA7UmhtGRcI8rjuSFDhGKLSeVPbhJaZI44zKtv9yF+9jbdvMPDfQWo6FUle+7U9TkiEE58JIsD6wW3uRCw/F1g70Qq00GOcbiOsejsE7Zpk5vlIjFM3waqTBdDbc1ub8STVdBla6Ds68U7ksjvOFIG95WDN70c6oivuafDmDFQJv8IuRPUZKX5ITxuS/y/7+piqT1mF8dUAeptw3P2NXJ4aPxjAvUySZSb0OPV2FFzjdTVHglUet53R1Ve6R5x63P80dzJb6FusuK8IXt8v55wfBVa5kmGMJn6FCb09jm7GpaCs6HbmoMA7XdcvsWCVA16fVdzTDOpykLpycJ1X3BlUWti4mSeW6ET/gyjalRiz9kcKnTwA522LsBD5wfX/52NdtZ0t9BLTWtomVPyi80yhFQh4eyJrF2xpKtfX3u+XFXT5+1eyhPuWrrwyAr3WnkeMWuiA1dY7lyfFBSVp1ib40JXAzuyJKyGLtQhAd0SUzjk6cu/Ie+LnIYyuvyksF2f5HmlfEVrshePOwa6AmoSaVD4HtqvD+hEzTfhEbDGRN+FEO5J2pdjy7wLFP/HfQOE0hYfSJzd8SBkvOoSgQ/t7pJb2gi12oQ+DRPWJJNoGQfeCVefy3f1zLIuET65801xgtILHIQyWnyH+6pz4WmSpcgXuz7iPFmcwNbHvq6/p3Zfa22/dndE/R6874y2c1yOA2zxOqUelrz3wPLOD8+p93RYssgDqpw6/UI7zWFPmbiLukG7XRF3Uq/KDtv3Wf7Wf4TmZyJG79lKv0/ZRfH+bjt615uH287zWZTQZ9/Yi7L72lfe25R0DJWtxSBXnZGnprOIooOoesEr5ue57dt06PzkSlXE/kz6v9Ru/8nZXSc0V+Rfka0cKPR4HP/+e1S3kfrtYshgJopqoehwoy2+v3yrc/umcqsGB4zdjrcCHPYxTdubwvD1Bx5qIrrug+kR8z0ZEQQ61cPIXyPcWzmvGbu6Ty56o3pEkZX52LT7ELAA8h32qCg+D3Fsbl/7zO2pe2GqVDXfaeWW+Fy3rXLHiz6s8MGtyyCuHvp4t5PLuDDnDisdsJvW68coutabb7TPcdNTtirrRww9y7re43IHCWTvPzVo58l+zvtXEzQ/3UDe8vOsHKhu7cr3lPuu9QEUUgN7ikSNQ6Am2fEaNUd0dmddzXZuhmjw6gk1xveaFrrZRcdd18VIUGfGgfERhCTd6kNZdj6MTVnjOshXFw4G1av+SPxnNs1deXczjvSDLTwLoH+hkuEvvlj+/2ccOq67U67C7kZPoPBzurMtrvZklXP5+gRPzAFX9SNL5R2tfiHK66b6s/dtsM877OtnGDC1Zlftt9tfHDa0coM37tP6G/GP3T6qLvTfvVnnBkaiNXmQBbDVtvIFx39znO3GwbQcUuqzJx8ff6ggdbD74U3MMxuSvnevNJrDvnbGNb7/TbdrTrfEQF3gTO2/qi5YfI8sXfkhwtyndIY4ZO9xfaR8vi230+3p8si0zR8ZfXccaqE/Fq/B1qMt0YfkxSbvF5LAz1gqEny0kf4N4+0rb+qe6RG+CNAdO3mXGAQ10pO8TXxWognEDLiswNxxQZU10ZEIOaBT/rt2KPtxK7fBkZUVU7mhjJi62+JFPTwOK3OumeY/Af78L521wO0Dkd0HuJ4rAbznmtx3yx8bXdcytzpFBo/trCPemk0PIhr4GAL0GC8KNUJCaPMEIDbI3on/cgtJklb2uyYAF/gS+RBK2qTSy+vij+qBCqWjx4Q886haNPbakj+nsmjJPPYahLtLrT7SBfPEjkvMRKXSPE5JjkV6fNGT5awpIf940kLUTgs6Gz/v7wvvt59JfLrfs4RM0eIagV4TArXp16v63EYfnomw0dC7jTY/QlTVJfH6TBTW1I2Z16uu9BEoc44Ry6yw1P21KVG+0n3yiL7m6e3LaTT/demJxTyXOfAhLSr+BigFAUkO1Nu0tzcc57/Z+DaWc9VjTB1zuswV9+lDW7ffz3tA7gaKdVr4WY+v8Fo0m1m6t2YIPN2fPsaKd2l4CwXiDoC4JSJ42FJWIRmFF7zW2Ygh96tB1PHL09iphuext7pYdaxqs0zf+3XNc7zDxk6oH7m+l6j4duQbKDaKFyLzLp4VW2+bFGb//jcA/u9z8FRry7Zaqd2nJgts4XEe/Lh/OIsr/vr57nS5uZVTOI9q87m3DayOCElKXsaI9d037dtFDv0+zVS4IQMagYhYkMUeDetz4iR4ML3Wl2fMXe6xb732FPuzTCn8r5fidvvC/hrbs0a1/cR9wJ26Kd/u6VX8T0MUf+VBsaMrOHVu2b3u+03aJpxtra7m82wv8dTqyhw/+BXp5d4c/8tVrMQT/VPdMgV+suh0QR6KaMoLOqN8F8YUOiK9Dv6908n6q88H5frdv9Vkd7svn3U5H7UKgz0i6nX6vo69+n5UduZ/t5Jt4ewz8I4Xze/vh8dTuTn4jWWxXBupP5Hqwre5Kbql/7V58rQVZGpN7p4JoJjy0v92J8Sc+3eh8KthnkblpV/c370Lc4W7hT1c1et8H/+3dwR9UJW67sbrdVTypjDXbeNRsg1yeH4J0MGnch2S7GqJtlpa5CRVf6iSNEx5sMlZ/aNEkjQXJvDJN/SBh5M/cPPtu/7+QSiGbZY/0+/1S3WffTBTTtz6wJd0zf5+6+ZMUSfejvDfpkVMA3Fc0wBP+H+uT/6g+N/3lv//f/wQAAP//vp+ZfQ==") SupportedMap = make(map[string]Spec) for f, v := range unpacked { diff --git a/internal/pkg/agent/program/testdata/single_config-packetbeat.yml b/internal/pkg/agent/program/testdata/single_config-packetbeat.yml index f800d0bd2a0..3d62d2c49c6 100644 --- a/internal/pkg/agent/program/testdata/single_config-packetbeat.yml +++ b/internal/pkg/agent/program/testdata/single_config-packetbeat.yml @@ -23,6 +23,13 @@ inputs: data_stream: dataset: packet.icmp type: logs + - id: packet-network_traffic.redis-387bdc6a-0acb-4ef2-9552-c21e524a2d21 + type: redis + data_stream: + dataset: network_traffic.redis + type: logs + ports: + - 6379 output: elasticsearch: hosts: diff --git a/internal/pkg/agent/program/testdata/single_config.yml b/internal/pkg/agent/program/testdata/single_config.yml index 16a03f9a77d..b2cd1f87466 100644 --- a/internal/pkg/agent/program/testdata/single_config.yml +++ b/internal/pkg/agent/program/testdata/single_config.yml @@ -104,6 +104,13 @@ inputs: data_stream: dataset: packet.icmp type: logs + - id: packet-network_traffic.redis-387bdc6a-0acb-4ef2-9552-c21e524a2d21 + type: redis + data_stream: + dataset: network_traffic.redis + type: logs + ports: + - 6379 - id: endpoint-id type: endpoint name: endpoint-1 diff --git a/internal/spec/filebeat.yml b/internal/spec/filebeat.yml index 10f8ee4493b..e4dbd2a9892 100644 --- a/internal/spec/filebeat.yml +++ b/internal/spec/filebeat.yml @@ -19,6 +19,44 @@ rules: on_conflict: insert_after type: logs +# Input filtering needs to happen before any other input transformations. +# See https://github.com/elastic/elastic-agent/issues/427. +- filter_values: + selector: inputs + key: type + values: + - aws-cloudwatch + - aws-s3 + - azure-eventhub + - cloudfoundry + - container + - docker + - event/file + - event/stdin + - event/tcp + - event/udp + - filestream + - gcp-pubsub + - http_endpoint + - httpjson + - journald + - kafka + - log + - log/docker + - log/redis_slowlog + - log/syslog + - logfile + - mqtt + - netflow + - o365audit + - redis + - stdin + - syslog + - tcp + - udp + - unix + - winlog + - map: path: inputs rules: @@ -63,34 +101,6 @@ rules: - remove_key: key: data_stream.dataset -- filter_values: - selector: inputs - key: type - values: - - aws-cloudwatch - - aws-s3 - - azure-eventhub - - cloudfoundry - - container - - docker - - gcp-pubsub - - http_endpoint - - httpjson - - journald - - kafka - - log - - mqtt - - netflow - - o365audit - - redis - - stdin - - syslog - - tcp - - udp - - unix - - winlog - - filestream - - filter_values: selector: inputs key: enabled From f4d2b48fc2c07f45f98631af541ee9802e6959c3 Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Fri, 15 Jul 2022 01:43:11 -0400 Subject: [PATCH 046/180] [Automation] Update elastic stack version to 8.4.0-3cd57abb for testing (#724) Co-authored-by: apmmachine --- testing/environments/snapshot.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/testing/environments/snapshot.yml b/testing/environments/snapshot.yml index 0d7fb81c508..f05494199a3 100644 --- a/testing/environments/snapshot.yml +++ b/testing/environments/snapshot.yml @@ -3,7 +3,7 @@ version: '2.3' services: elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch:8.4.0-00048b66-SNAPSHOT + image: docker.elastic.co/elasticsearch/elasticsearch:8.4.0-3cd57abb-SNAPSHOT # When extend is used it merges healthcheck.tests, see: # https://github.com/docker/compose/issues/8962 # healthcheck: @@ -42,7 +42,7 @@ services: - ./docker/logstash/pki:/etc/pki:ro kibana: - image: docker.elastic.co/kibana/kibana:8.4.0-00048b66-SNAPSHOT + image: docker.elastic.co/kibana/kibana:8.4.0-3cd57abb-SNAPSHOT environment: - "ELASTICSEARCH_USERNAME=kibana_system_user" - "ELASTICSEARCH_PASSWORD=testing" From a5351f961d535b0bc39f9ea283fa7058c6fd6f5e Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Mon, 18 Jul 2022 01:45:03 -0400 Subject: [PATCH 047/180] [Automation] Update elastic stack version to 8.4.0-a324b98b for testing (#727) Co-authored-by: apmmachine --- testing/environments/snapshot.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/testing/environments/snapshot.yml b/testing/environments/snapshot.yml index f05494199a3..f75cba71333 100644 --- a/testing/environments/snapshot.yml +++ b/testing/environments/snapshot.yml @@ -3,7 +3,7 @@ version: '2.3' services: elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch:8.4.0-3cd57abb-SNAPSHOT + image: docker.elastic.co/elasticsearch/elasticsearch:8.4.0-a324b98b-SNAPSHOT # When extend is used it merges healthcheck.tests, see: # https://github.com/docker/compose/issues/8962 # healthcheck: @@ -42,7 +42,7 @@ services: - ./docker/logstash/pki:/etc/pki:ro kibana: - image: docker.elastic.co/kibana/kibana:8.4.0-3cd57abb-SNAPSHOT + image: docker.elastic.co/kibana/kibana:8.4.0-a324b98b-SNAPSHOT environment: - "ELASTICSEARCH_USERNAME=kibana_system_user" - "ELASTICSEARCH_PASSWORD=testing" From c6cdfc0a499f821d269ca35575151453408ba962 Mon Sep 17 00:00:00 2001 From: Victor Martinez Date: Mon, 18 Jul 2022 15:43:13 +0100 Subject: [PATCH 048/180] ci: run on MacOS12 (#696) --- .ci/Jenkinsfile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.ci/Jenkinsfile b/.ci/Jenkinsfile index 9454f8d2cc6..34a72100707 100644 --- a/.ci/Jenkinsfile +++ b/.ci/Jenkinsfile @@ -79,7 +79,7 @@ pipeline { axes { axis { name 'PLATFORM' - values 'ubuntu-20.04 && immutable', 'aws && aarch64', 'windows-2016 && windows-immutable', 'windows-2022 && windows-immutable', 'darwin && orka && x86_64' + values 'ubuntu-20.04 && immutable', 'aws && aarch64', 'windows-2016 && windows-immutable', 'windows-2022 && windows-immutable', 'macos12 && x86_64' } } stages { @@ -277,7 +277,7 @@ def isCodeCoverageEnabled() { def withPackageEnv(platform, Closure body) { if (isUnix()) { - if (platform.contains('macosx')) { + if (isDarwin()) { withPackageDarwinEnv() { body() } From 21c48120f7e9e722a83153b2d07169c930117e8b Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Tue, 19 Jul 2022 01:43:20 -0400 Subject: [PATCH 049/180] [Automation] Update elastic stack version to 8.4.0-31315ca3 for testing (#732) Co-authored-by: apmmachine --- testing/environments/snapshot.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/testing/environments/snapshot.yml b/testing/environments/snapshot.yml index f75cba71333..43b309fb98e 100644 --- a/testing/environments/snapshot.yml +++ b/testing/environments/snapshot.yml @@ -3,7 +3,7 @@ version: '2.3' services: elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch:8.4.0-a324b98b-SNAPSHOT + image: docker.elastic.co/elasticsearch/elasticsearch:8.4.0-31315ca3-SNAPSHOT # When extend is used it merges healthcheck.tests, see: # https://github.com/docker/compose/issues/8962 # healthcheck: @@ -42,7 +42,7 @@ services: - ./docker/logstash/pki:/etc/pki:ro kibana: - image: docker.elastic.co/kibana/kibana:8.4.0-a324b98b-SNAPSHOT + image: docker.elastic.co/kibana/kibana:8.4.0-31315ca3-SNAPSHOT environment: - "ELASTICSEARCH_USERNAME=kibana_system_user" - "ELASTICSEARCH_PASSWORD=testing" From 78019a9c9615592c2e957a97af0079129132fcfd Mon Sep 17 00:00:00 2001 From: Tiago Queiroz Date: Tue, 19 Jul 2022 10:56:10 +0200 Subject: [PATCH 050/180] fix typo on package command (#734) This commit fixes the typo in the package command on the README.md. --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 25aff95042e..df27ac6f99a 100644 --- a/README.md +++ b/README.md @@ -17,7 +17,7 @@ In Linux operating systems that you can not run docker as a root user you need t Running Elastic Agent in a docker container is a common use case. To build the Elastic Agent and create a docker image run the following command: ``` -DEV=true SNAPSHOT=true PLATFORMS=linux/amd64 TYPES=docker mage package +DEV=true SNAPSHOT=true PLATFORMS=linux/amd64 PACKAGES=docker mage package ``` If you are in the 7.13 branch, this will create the `docker.elastic.co/beats/elastic-agent:7.13.0-SNAPSHOT` image in your local environment. Now you can use this to for example test this container with the stack in elastic-package: @@ -45,7 +45,7 @@ for the standard variant. 1. Build elastic-agent: ```bash -DEV=true PLATFORMS=linux/amd64 TYPES=docker mage package +DEV=true PLATFORMS=linux/amd64 PACKAGES=docker mage package ``` Use environmental variables `GOHOSTOS` and `GOHOSTARCH` to specify PLATFORMS variable accordingly. eg. From e34e0e71d0c106381f9120b5d05032f6358a64cc Mon Sep 17 00:00:00 2001 From: Michel Laterman <82832767+michel-laterman@users.noreply.github.com> Date: Tue, 19 Jul 2022 11:53:00 -0700 Subject: [PATCH 051/180] Allow / to be used in variable names (#718) * Allow the / character to be used in variable names. Allow / to be used in variable names from dynamic providers and eql expressions. Ensure that k8s providers can provide variables with slashes in their names. * run antlr4 * Fix tests --- CHANGELOG.next.asciidoc | 1 + internal/pkg/agent/transpiler/vars.go | 2 +- internal/pkg/agent/transpiler/vars_test.go | 7 + .../providers/kubernetes/node_test.go | 20 ++- .../providers/kubernetes/pod_test.go | 52 +++--- .../providers/kubernetes/service_test.go | 20 ++- internal/pkg/eql/Eql.g4 | 2 +- internal/pkg/eql/eql_test.go | 10 +- internal/pkg/eql/parser/EqlLexer.interp | 2 +- internal/pkg/eql/parser/eql_lexer.go | 154 +++++++++--------- 10 files changed, 150 insertions(+), 120 deletions(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 33aa1756f0d..6ee9674253e 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -109,6 +109,7 @@ - diagnostics collect file mod times are set. {pull}570[570] - Allow ':' characters in dynamic variables {issue}624[624] {pull}680[680] - Allow the - char to appear as part of variable names in eql expressions. {issue}709[709] {pull}710[710] +- Allow the / char in variable names in eql and transpiler. {issue}715[715] {pull}718[718] ==== New features diff --git a/internal/pkg/agent/transpiler/vars.go b/internal/pkg/agent/transpiler/vars.go index a9f96b15ee8..e8f06a6928b 100644 --- a/internal/pkg/agent/transpiler/vars.go +++ b/internal/pkg/agent/transpiler/vars.go @@ -14,7 +14,7 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/core/composable" ) -var varsRegex = regexp.MustCompile(`\${([\p{L}\d\s\\\-_|.'":]*)}`) +var varsRegex = regexp.MustCompile(`\${([\p{L}\d\s\\\-_|.'":\/]*)}`) // ErrNoMatch is return when the replace didn't fail, just that no vars match to perform the replace. var ErrNoMatch = fmt.Errorf("no matching vars") diff --git a/internal/pkg/agent/transpiler/vars_test.go b/internal/pkg/agent/transpiler/vars_test.go index 142ab132109..56e27694a33 100644 --- a/internal/pkg/agent/transpiler/vars_test.go +++ b/internal/pkg/agent/transpiler/vars_test.go @@ -24,6 +24,7 @@ func TestVars_Replace(t *testing.T) { "array1", "array2", }, + "with/slash": "some/path", "dict": map[string]interface{}{ "key1": "value1", "key2": "value2", @@ -168,6 +169,12 @@ func TestVars_Replace(t *testing.T) { false, false, }, + { + `${un-der_score.with/slash}`, + NewStrVal(`some/path`), + false, + false, + }, { `list inside string ${un-der_score.list} causes no match`, NewList([]Node{ diff --git a/internal/pkg/composable/providers/kubernetes/node_test.go b/internal/pkg/composable/providers/kubernetes/node_test.go index 547702573c4..ab19e7d2ce2 100644 --- a/internal/pkg/composable/providers/kubernetes/node_test.go +++ b/internal/pkg/composable/providers/kubernetes/node_test.go @@ -26,8 +26,9 @@ func TestGenerateNodeData(t *testing.T) { Name: "testnode", UID: types.UID(uid), Labels: map[string]string{ - "foo": "bar", - "with-dash": "dash-value", + "foo": "bar", + "with-dash": "dash-value", + "with/slash": "some/path", }, Annotations: map[string]string{ "baz": "ban", @@ -55,8 +56,9 @@ func TestGenerateNodeData(t *testing.T) { "baz": "ban", }, "labels": mapstr.M{ - "foo": "bar", - "with-dash": "dash-value", + "foo": "bar", + "with-dash": "dash-value", + "with/slash": "some/path", }, } @@ -67,8 +69,9 @@ func TestGenerateNodeData(t *testing.T) { "url": "8.8.8.8:9090"}, }, "kubernetes": mapstr.M{ "labels": mapstr.M{ - "foo": "bar", - "with-dash": "dash-value", + "foo": "bar", + "with-dash": "dash-value", + "with/slash": "some/path", }, "annotations": mapstr.M{"baz": "ban"}, "node": mapstr.M{ @@ -128,8 +131,9 @@ func (n *nodeMeta) GenerateK8s(obj kubernetes.Resource, opts ...metadata.FieldOp "ip": "node1", }, "labels": mapstr.M{ - "foo": "bar", - "with-dash": "dash-value", + "foo": "bar", + "with-dash": "dash-value", + "with/slash": "some/path", }, "annotations": mapstr.M{ "baz": "ban", diff --git a/internal/pkg/composable/providers/kubernetes/pod_test.go b/internal/pkg/composable/providers/kubernetes/pod_test.go index feeba193472..95361fd2ce0 100644 --- a/internal/pkg/composable/providers/kubernetes/pod_test.go +++ b/internal/pkg/composable/providers/kubernetes/pod_test.go @@ -27,8 +27,9 @@ func TestGeneratePodData(t *testing.T) { UID: types.UID(uid), Namespace: "testns", Labels: map[string]string{ - "foo": "bar", - "with-dash": "dash-value", + "foo": "bar", + "with-dash": "dash-value", + "with/slash": "some/path", }, Annotations: map[string]string{ "app": "production", @@ -60,8 +61,9 @@ func TestGeneratePodData(t *testing.T) { "nsa": "nsb", }, "labels": mapstr.M{ - "foo": "bar", - "with-dash": "dash-value", + "foo": "bar", + "with-dash": "dash-value", + "with/slash": "some/path", }, "annotations": mapstr.M{ "app": "production", @@ -76,8 +78,9 @@ func TestGeneratePodData(t *testing.T) { }, "kubernetes": mapstr.M{ "namespace": "testns", "labels": mapstr.M{ - "foo": "bar", - "with-dash": "dash-value", + "foo": "bar", + "with-dash": "dash-value", + "with/slash": "some/path", }, "annotations": mapstr.M{"app": "production"}, "pod": mapstr.M{ @@ -122,8 +125,9 @@ func TestGenerateContainerPodData(t *testing.T) { UID: types.UID(uid), Namespace: "testns", Labels: map[string]string{ - "foo": "bar", - "with-dash": "dash-value", + "foo": "bar", + "with-dash": "dash-value", + "with/slash": "some/path", }, Annotations: map[string]string{ "app": "production", @@ -179,8 +183,9 @@ func TestGenerateContainerPodData(t *testing.T) { "app": "production", }, "labels": mapstr.M{ - "foo": "bar", - "with-dash": "dash-value", + "foo": "bar", + "with-dash": "dash-value", + "with/slash": "some/path", }, } @@ -196,8 +201,10 @@ func TestGenerateContainerPodData(t *testing.T) { }, "kubernetes": mapstr.M{ "namespace": "testns", "annotations": mapstr.M{"app": "production"}, - "labels": mapstr.M{"foo": "bar", - "with-dash": "dash-value", + "labels": mapstr.M{ + "foo": "bar", + "with-dash": "dash-value", + "with/slash": "some/path", }, "pod": mapstr.M{ "ip": "127.0.0.5", @@ -239,8 +246,9 @@ func TestEphemeralContainers(t *testing.T) { UID: types.UID(uid), Namespace: "testns", Labels: map[string]string{ - "foo": "bar", - "with-dash": "dash-value", + "foo": "bar", + "with-dash": "dash-value", + "with/slash": "some/path", }, Annotations: map[string]string{ "app": "production", @@ -282,8 +290,9 @@ func TestEphemeralContainers(t *testing.T) { "ip": pod.Status.PodIP, }, "labels": mapstr.M{ - "foo": "bar", - "with-dash": "dash-value", + "foo": "bar", + "with-dash": "dash-value", + "with/slash": "some/path", }, "container": mapstr.M{ "id": "asdfghdeadbeef", @@ -310,8 +319,10 @@ func TestEphemeralContainers(t *testing.T) { "url": "8.8.8.8:9090"}, }, "kubernetes": mapstr.M{ "namespace": "testns", - "labels": mapstr.M{"foo": "bar", - "with-dash": "dash-value", + "labels": mapstr.M{ + "foo": "bar", + "with-dash": "dash-value", + "with/slash": "some/path", }, "annotations": mapstr.M{"app": "production"}, "pod": mapstr.M{ @@ -394,8 +405,9 @@ func (p *podMeta) GenerateK8s(obj kubernetes.Resource, opts ...metadata.FieldOpt "ip": k8sPod.Status.PodIP, }, "labels": mapstr.M{ - "foo": "bar", - "with-dash": "dash-value", + "foo": "bar", + "with-dash": "dash-value", + "with/slash": "some/path", }, "annotations": mapstr.M{ "app": "production", diff --git a/internal/pkg/composable/providers/kubernetes/service_test.go b/internal/pkg/composable/providers/kubernetes/service_test.go index 0fbed196908..69e945ee1cd 100644 --- a/internal/pkg/composable/providers/kubernetes/service_test.go +++ b/internal/pkg/composable/providers/kubernetes/service_test.go @@ -25,8 +25,9 @@ func TestGenerateServiceData(t *testing.T) { UID: types.UID(uid), Namespace: "testns", Labels: map[string]string{ - "foo": "bar", - "with-dash": "dash-value", + "foo": "bar", + "with-dash": "dash-value", + "with/slash": "some/path", }, Annotations: map[string]string{ "baz": "ban", @@ -65,8 +66,9 @@ func TestGenerateServiceData(t *testing.T) { "baz": "ban", }, "labels": mapstr.M{ - "foo": "bar", - "with-dash": "dash-value", + "foo": "bar", + "with-dash": "dash-value", + "with/slash": "some/path", }, } @@ -82,8 +84,9 @@ func TestGenerateServiceData(t *testing.T) { "ip": "1.2.3.4", }, "labels": mapstr.M{ - "foo": "bar", - "with-dash": "dash-value", + "foo": "bar", + "with-dash": "dash-value", + "with/slash": "some/path", }, "annotations": mapstr.M{ "baz": "ban", @@ -142,8 +145,9 @@ func (s *svcMeta) GenerateK8s(obj kubernetes.Resource, opts ...metadata.FieldOpt "ip": "1.2.3.4", }, "labels": mapstr.M{ - "foo": "bar", - "with-dash": "dash-value", + "foo": "bar", + "with-dash": "dash-value", + "with/slash": "some/path", }, "annotations": mapstr.M{ "baz": "ban", diff --git a/internal/pkg/eql/Eql.g4 b/internal/pkg/eql/Eql.g4 index b6731d41cef..bb7b5a88770 100644 --- a/internal/pkg/eql/Eql.g4 +++ b/internal/pkg/eql/Eql.g4 @@ -22,7 +22,7 @@ NUMBER: [\-]? [0-9]+; WHITESPACE: [ \r\n\t]+ -> skip; NOT: 'NOT' | 'not'; NAME: [a-zA-Z_] [a-zA-Z0-9_]*; -VNAME: [a-zA-Z0-9_.-]+('.'[a-zA-Z0-9_-]+)*; +VNAME: [a-zA-Z0-9_.\-/]+('.'[a-zA-Z0-9_\-/]+)*; STEXT: '\'' ~[\r\n']* '\''; DTEXT: '"' ~[\r\n"]* '"'; LPAR: '('; diff --git a/internal/pkg/eql/eql_test.go b/internal/pkg/eql/eql_test.go index 77bf5cb37f0..54f7741f88d 100644 --- a/internal/pkg/eql/eql_test.go +++ b/internal/pkg/eql/eql_test.go @@ -44,6 +44,7 @@ func TestEql(t *testing.T) { {expression: "${'constant'} == 'constant'", result: true}, {expression: "${data.with-dash} == 'dash-value'", result: true}, {expression: "${'dash-value'} == 'dash-value'", result: true}, + {expression: "${data.with/slash} == 'some/path'", result: true}, // boolean {expression: "true", result: true}, @@ -308,10 +309,11 @@ func TestEql(t *testing.T) { store := &testVarStore{ vars: map[string]interface{}{ - "env.HOSTNAME": "my-hostname", - "host.name": "host-name", - "data.array": []interface{}{"array1", "array2", "array3"}, - "data.with-dash": "dash-value", + "env.HOSTNAME": "my-hostname", + "host.name": "host-name", + "data.array": []interface{}{"array1", "array2", "array3"}, + "data.with-dash": "dash-value", + "data.with/slash": "some/path", "data.dict": map[string]interface{}{ "key1": "dict1", "key2": "dict2", diff --git a/internal/pkg/eql/parser/EqlLexer.interp b/internal/pkg/eql/parser/EqlLexer.interp index 3432105b62f..66413a00c42 100644 --- a/internal/pkg/eql/parser/EqlLexer.interp +++ b/internal/pkg/eql/parser/EqlLexer.interp @@ -113,4 +113,4 @@ mode names: DEFAULT_MODE atn: -[3, 24715, 42794, 33075, 47597, 16764, 15335, 30598, 22884, 2, 35, 230, 8, 1, 4, 2, 9, 2, 4, 3, 9, 3, 4, 4, 9, 4, 4, 5, 9, 5, 4, 6, 9, 6, 4, 7, 9, 7, 4, 8, 9, 8, 4, 9, 9, 9, 4, 10, 9, 10, 4, 11, 9, 11, 4, 12, 9, 12, 4, 13, 9, 13, 4, 14, 9, 14, 4, 15, 9, 15, 4, 16, 9, 16, 4, 17, 9, 17, 4, 18, 9, 18, 4, 19, 9, 19, 4, 20, 9, 20, 4, 21, 9, 21, 4, 22, 9, 22, 4, 23, 9, 23, 4, 24, 9, 24, 4, 25, 9, 25, 4, 26, 9, 26, 4, 27, 9, 27, 4, 28, 9, 28, 4, 29, 9, 29, 4, 30, 9, 30, 4, 31, 9, 31, 4, 32, 9, 32, 4, 33, 9, 33, 4, 34, 9, 34, 3, 2, 3, 2, 3, 3, 3, 3, 3, 4, 3, 4, 3, 5, 3, 5, 3, 5, 3, 6, 3, 6, 3, 6, 3, 7, 3, 7, 3, 8, 3, 8, 3, 9, 3, 9, 3, 9, 3, 10, 3, 10, 3, 10, 3, 11, 3, 11, 3, 12, 3, 12, 3, 13, 3, 13, 3, 14, 3, 14, 3, 15, 3, 15, 3, 16, 3, 16, 3, 16, 3, 16, 3, 16, 3, 16, 5, 16, 108, 10, 16, 3, 17, 3, 17, 3, 17, 3, 17, 5, 17, 114, 10, 17, 3, 18, 3, 18, 3, 18, 3, 18, 3, 18, 3, 18, 3, 18, 3, 18, 5, 18, 124, 10, 18, 3, 19, 3, 19, 3, 19, 3, 19, 3, 19, 3, 19, 3, 19, 3, 19, 3, 19, 3, 19, 5, 19, 136, 10, 19, 3, 20, 5, 20, 139, 10, 20, 3, 20, 6, 20, 142, 10, 20, 13, 20, 14, 20, 143, 3, 20, 3, 20, 6, 20, 148, 10, 20, 13, 20, 14, 20, 149, 3, 21, 5, 21, 153, 10, 21, 3, 21, 6, 21, 156, 10, 21, 13, 21, 14, 21, 157, 3, 22, 6, 22, 161, 10, 22, 13, 22, 14, 22, 162, 3, 22, 3, 22, 3, 23, 3, 23, 3, 23, 3, 23, 3, 23, 3, 23, 5, 23, 173, 10, 23, 3, 24, 3, 24, 7, 24, 177, 10, 24, 12, 24, 14, 24, 180, 11, 24, 3, 25, 6, 25, 183, 10, 25, 13, 25, 14, 25, 184, 3, 25, 3, 25, 6, 25, 189, 10, 25, 13, 25, 14, 25, 190, 7, 25, 193, 10, 25, 12, 25, 14, 25, 196, 11, 25, 3, 26, 3, 26, 7, 26, 200, 10, 26, 12, 26, 14, 26, 203, 11, 26, 3, 26, 3, 26, 3, 27, 3, 27, 7, 27, 209, 10, 27, 12, 27, 14, 27, 212, 11, 27, 3, 27, 3, 27, 3, 28, 3, 28, 3, 29, 3, 29, 3, 30, 3, 30, 3, 31, 3, 31, 3, 32, 3, 32, 3, 33, 3, 33, 3, 34, 3, 34, 3, 34, 2, 2, 35, 3, 3, 5, 4, 7, 5, 9, 6, 11, 7, 13, 8, 15, 9, 17, 10, 19, 11, 21, 12, 23, 13, 25, 14, 27, 15, 29, 16, 31, 17, 33, 18, 35, 19, 37, 20, 39, 21, 41, 22, 43, 23, 45, 24, 47, 25, 49, 26, 51, 27, 53, 28, 55, 29, 57, 30, 59, 31, 61, 32, 63, 33, 65, 34, 67, 35, 3, 2, 11, 3, 2, 47, 47, 3, 2, 50, 59, 5, 2, 11, 12, 15, 15, 34, 34, 5, 2, 67, 92, 97, 97, 99, 124, 6, 2, 50, 59, 67, 92, 97, 97, 99, 124, 7, 2, 47, 48, 50, 59, 67, 92, 97, 97, 99, 124, 7, 2, 47, 47, 50, 59, 67, 92, 97, 97, 99, 124, 5, 2, 12, 12, 15, 15, 41, 41, 5, 2, 12, 12, 15, 15, 36, 36, 2, 246, 2, 3, 3, 2, 2, 2, 2, 5, 3, 2, 2, 2, 2, 7, 3, 2, 2, 2, 2, 9, 3, 2, 2, 2, 2, 11, 3, 2, 2, 2, 2, 13, 3, 2, 2, 2, 2, 15, 3, 2, 2, 2, 2, 17, 3, 2, 2, 2, 2, 19, 3, 2, 2, 2, 2, 21, 3, 2, 2, 2, 2, 23, 3, 2, 2, 2, 2, 25, 3, 2, 2, 2, 2, 27, 3, 2, 2, 2, 2, 29, 3, 2, 2, 2, 2, 31, 3, 2, 2, 2, 2, 33, 3, 2, 2, 2, 2, 35, 3, 2, 2, 2, 2, 37, 3, 2, 2, 2, 2, 39, 3, 2, 2, 2, 2, 41, 3, 2, 2, 2, 2, 43, 3, 2, 2, 2, 2, 45, 3, 2, 2, 2, 2, 47, 3, 2, 2, 2, 2, 49, 3, 2, 2, 2, 2, 51, 3, 2, 2, 2, 2, 53, 3, 2, 2, 2, 2, 55, 3, 2, 2, 2, 2, 57, 3, 2, 2, 2, 2, 59, 3, 2, 2, 2, 2, 61, 3, 2, 2, 2, 2, 63, 3, 2, 2, 2, 2, 65, 3, 2, 2, 2, 2, 67, 3, 2, 2, 2, 3, 69, 3, 2, 2, 2, 5, 71, 3, 2, 2, 2, 7, 73, 3, 2, 2, 2, 9, 75, 3, 2, 2, 2, 11, 78, 3, 2, 2, 2, 13, 81, 3, 2, 2, 2, 15, 83, 3, 2, 2, 2, 17, 85, 3, 2, 2, 2, 19, 88, 3, 2, 2, 2, 21, 91, 3, 2, 2, 2, 23, 93, 3, 2, 2, 2, 25, 95, 3, 2, 2, 2, 27, 97, 3, 2, 2, 2, 29, 99, 3, 2, 2, 2, 31, 107, 3, 2, 2, 2, 33, 113, 3, 2, 2, 2, 35, 123, 3, 2, 2, 2, 37, 135, 3, 2, 2, 2, 39, 138, 3, 2, 2, 2, 41, 152, 3, 2, 2, 2, 43, 160, 3, 2, 2, 2, 45, 172, 3, 2, 2, 2, 47, 174, 3, 2, 2, 2, 49, 182, 3, 2, 2, 2, 51, 197, 3, 2, 2, 2, 53, 206, 3, 2, 2, 2, 55, 215, 3, 2, 2, 2, 57, 217, 3, 2, 2, 2, 59, 219, 3, 2, 2, 2, 61, 221, 3, 2, 2, 2, 63, 223, 3, 2, 2, 2, 65, 225, 3, 2, 2, 2, 67, 227, 3, 2, 2, 2, 69, 70, 7, 126, 2, 2, 70, 4, 3, 2, 2, 2, 71, 72, 7, 46, 2, 2, 72, 6, 3, 2, 2, 2, 73, 74, 7, 60, 2, 2, 74, 8, 3, 2, 2, 2, 75, 76, 7, 63, 2, 2, 76, 77, 7, 63, 2, 2, 77, 10, 3, 2, 2, 2, 78, 79, 7, 35, 2, 2, 79, 80, 7, 63, 2, 2, 80, 12, 3, 2, 2, 2, 81, 82, 7, 64, 2, 2, 82, 14, 3, 2, 2, 2, 83, 84, 7, 62, 2, 2, 84, 16, 3, 2, 2, 2, 85, 86, 7, 64, 2, 2, 86, 87, 7, 63, 2, 2, 87, 18, 3, 2, 2, 2, 88, 89, 7, 62, 2, 2, 89, 90, 7, 63, 2, 2, 90, 20, 3, 2, 2, 2, 91, 92, 7, 45, 2, 2, 92, 22, 3, 2, 2, 2, 93, 94, 7, 47, 2, 2, 94, 24, 3, 2, 2, 2, 95, 96, 7, 44, 2, 2, 96, 26, 3, 2, 2, 2, 97, 98, 7, 49, 2, 2, 98, 28, 3, 2, 2, 2, 99, 100, 7, 39, 2, 2, 100, 30, 3, 2, 2, 2, 101, 102, 7, 99, 2, 2, 102, 103, 7, 112, 2, 2, 103, 108, 7, 102, 2, 2, 104, 105, 7, 67, 2, 2, 105, 106, 7, 80, 2, 2, 106, 108, 7, 70, 2, 2, 107, 101, 3, 2, 2, 2, 107, 104, 3, 2, 2, 2, 108, 32, 3, 2, 2, 2, 109, 110, 7, 113, 2, 2, 110, 114, 7, 116, 2, 2, 111, 112, 7, 81, 2, 2, 112, 114, 7, 84, 2, 2, 113, 109, 3, 2, 2, 2, 113, 111, 3, 2, 2, 2, 114, 34, 3, 2, 2, 2, 115, 116, 7, 118, 2, 2, 116, 117, 7, 116, 2, 2, 117, 118, 7, 119, 2, 2, 118, 124, 7, 103, 2, 2, 119, 120, 7, 86, 2, 2, 120, 121, 7, 84, 2, 2, 121, 122, 7, 87, 2, 2, 122, 124, 7, 71, 2, 2, 123, 115, 3, 2, 2, 2, 123, 119, 3, 2, 2, 2, 124, 36, 3, 2, 2, 2, 125, 126, 7, 104, 2, 2, 126, 127, 7, 99, 2, 2, 127, 128, 7, 110, 2, 2, 128, 129, 7, 117, 2, 2, 129, 136, 7, 103, 2, 2, 130, 131, 7, 72, 2, 2, 131, 132, 7, 67, 2, 2, 132, 133, 7, 78, 2, 2, 133, 134, 7, 85, 2, 2, 134, 136, 7, 71, 2, 2, 135, 125, 3, 2, 2, 2, 135, 130, 3, 2, 2, 2, 136, 38, 3, 2, 2, 2, 137, 139, 9, 2, 2, 2, 138, 137, 3, 2, 2, 2, 138, 139, 3, 2, 2, 2, 139, 141, 3, 2, 2, 2, 140, 142, 9, 3, 2, 2, 141, 140, 3, 2, 2, 2, 142, 143, 3, 2, 2, 2, 143, 141, 3, 2, 2, 2, 143, 144, 3, 2, 2, 2, 144, 145, 3, 2, 2, 2, 145, 147, 7, 48, 2, 2, 146, 148, 9, 3, 2, 2, 147, 146, 3, 2, 2, 2, 148, 149, 3, 2, 2, 2, 149, 147, 3, 2, 2, 2, 149, 150, 3, 2, 2, 2, 150, 40, 3, 2, 2, 2, 151, 153, 9, 2, 2, 2, 152, 151, 3, 2, 2, 2, 152, 153, 3, 2, 2, 2, 153, 155, 3, 2, 2, 2, 154, 156, 9, 3, 2, 2, 155, 154, 3, 2, 2, 2, 156, 157, 3, 2, 2, 2, 157, 155, 3, 2, 2, 2, 157, 158, 3, 2, 2, 2, 158, 42, 3, 2, 2, 2, 159, 161, 9, 4, 2, 2, 160, 159, 3, 2, 2, 2, 161, 162, 3, 2, 2, 2, 162, 160, 3, 2, 2, 2, 162, 163, 3, 2, 2, 2, 163, 164, 3, 2, 2, 2, 164, 165, 8, 22, 2, 2, 165, 44, 3, 2, 2, 2, 166, 167, 7, 80, 2, 2, 167, 168, 7, 81, 2, 2, 168, 173, 7, 86, 2, 2, 169, 170, 7, 112, 2, 2, 170, 171, 7, 113, 2, 2, 171, 173, 7, 118, 2, 2, 172, 166, 3, 2, 2, 2, 172, 169, 3, 2, 2, 2, 173, 46, 3, 2, 2, 2, 174, 178, 9, 5, 2, 2, 175, 177, 9, 6, 2, 2, 176, 175, 3, 2, 2, 2, 177, 180, 3, 2, 2, 2, 178, 176, 3, 2, 2, 2, 178, 179, 3, 2, 2, 2, 179, 48, 3, 2, 2, 2, 180, 178, 3, 2, 2, 2, 181, 183, 9, 7, 2, 2, 182, 181, 3, 2, 2, 2, 183, 184, 3, 2, 2, 2, 184, 182, 3, 2, 2, 2, 184, 185, 3, 2, 2, 2, 185, 194, 3, 2, 2, 2, 186, 188, 7, 48, 2, 2, 187, 189, 9, 8, 2, 2, 188, 187, 3, 2, 2, 2, 189, 190, 3, 2, 2, 2, 190, 188, 3, 2, 2, 2, 190, 191, 3, 2, 2, 2, 191, 193, 3, 2, 2, 2, 192, 186, 3, 2, 2, 2, 193, 196, 3, 2, 2, 2, 194, 192, 3, 2, 2, 2, 194, 195, 3, 2, 2, 2, 195, 50, 3, 2, 2, 2, 196, 194, 3, 2, 2, 2, 197, 201, 7, 41, 2, 2, 198, 200, 10, 9, 2, 2, 199, 198, 3, 2, 2, 2, 200, 203, 3, 2, 2, 2, 201, 199, 3, 2, 2, 2, 201, 202, 3, 2, 2, 2, 202, 204, 3, 2, 2, 2, 203, 201, 3, 2, 2, 2, 204, 205, 7, 41, 2, 2, 205, 52, 3, 2, 2, 2, 206, 210, 7, 36, 2, 2, 207, 209, 10, 10, 2, 2, 208, 207, 3, 2, 2, 2, 209, 212, 3, 2, 2, 2, 210, 208, 3, 2, 2, 2, 210, 211, 3, 2, 2, 2, 211, 213, 3, 2, 2, 2, 212, 210, 3, 2, 2, 2, 213, 214, 7, 36, 2, 2, 214, 54, 3, 2, 2, 2, 215, 216, 7, 42, 2, 2, 216, 56, 3, 2, 2, 2, 217, 218, 7, 43, 2, 2, 218, 58, 3, 2, 2, 2, 219, 220, 7, 93, 2, 2, 220, 60, 3, 2, 2, 2, 221, 222, 7, 95, 2, 2, 222, 62, 3, 2, 2, 2, 223, 224, 7, 125, 2, 2, 224, 64, 3, 2, 2, 2, 225, 226, 7, 127, 2, 2, 226, 66, 3, 2, 2, 2, 227, 228, 7, 38, 2, 2, 228, 229, 7, 125, 2, 2, 229, 68, 3, 2, 2, 2, 20, 2, 107, 113, 123, 135, 138, 143, 149, 152, 157, 162, 172, 178, 184, 190, 194, 201, 210, 3, 8, 2, 2] \ No newline at end of file +[3, 24715, 42794, 33075, 47597, 16764, 15335, 30598, 22884, 2, 35, 230, 8, 1, 4, 2, 9, 2, 4, 3, 9, 3, 4, 4, 9, 4, 4, 5, 9, 5, 4, 6, 9, 6, 4, 7, 9, 7, 4, 8, 9, 8, 4, 9, 9, 9, 4, 10, 9, 10, 4, 11, 9, 11, 4, 12, 9, 12, 4, 13, 9, 13, 4, 14, 9, 14, 4, 15, 9, 15, 4, 16, 9, 16, 4, 17, 9, 17, 4, 18, 9, 18, 4, 19, 9, 19, 4, 20, 9, 20, 4, 21, 9, 21, 4, 22, 9, 22, 4, 23, 9, 23, 4, 24, 9, 24, 4, 25, 9, 25, 4, 26, 9, 26, 4, 27, 9, 27, 4, 28, 9, 28, 4, 29, 9, 29, 4, 30, 9, 30, 4, 31, 9, 31, 4, 32, 9, 32, 4, 33, 9, 33, 4, 34, 9, 34, 3, 2, 3, 2, 3, 3, 3, 3, 3, 4, 3, 4, 3, 5, 3, 5, 3, 5, 3, 6, 3, 6, 3, 6, 3, 7, 3, 7, 3, 8, 3, 8, 3, 9, 3, 9, 3, 9, 3, 10, 3, 10, 3, 10, 3, 11, 3, 11, 3, 12, 3, 12, 3, 13, 3, 13, 3, 14, 3, 14, 3, 15, 3, 15, 3, 16, 3, 16, 3, 16, 3, 16, 3, 16, 3, 16, 5, 16, 108, 10, 16, 3, 17, 3, 17, 3, 17, 3, 17, 5, 17, 114, 10, 17, 3, 18, 3, 18, 3, 18, 3, 18, 3, 18, 3, 18, 3, 18, 3, 18, 5, 18, 124, 10, 18, 3, 19, 3, 19, 3, 19, 3, 19, 3, 19, 3, 19, 3, 19, 3, 19, 3, 19, 3, 19, 5, 19, 136, 10, 19, 3, 20, 5, 20, 139, 10, 20, 3, 20, 6, 20, 142, 10, 20, 13, 20, 14, 20, 143, 3, 20, 3, 20, 6, 20, 148, 10, 20, 13, 20, 14, 20, 149, 3, 21, 5, 21, 153, 10, 21, 3, 21, 6, 21, 156, 10, 21, 13, 21, 14, 21, 157, 3, 22, 6, 22, 161, 10, 22, 13, 22, 14, 22, 162, 3, 22, 3, 22, 3, 23, 3, 23, 3, 23, 3, 23, 3, 23, 3, 23, 5, 23, 173, 10, 23, 3, 24, 3, 24, 7, 24, 177, 10, 24, 12, 24, 14, 24, 180, 11, 24, 3, 25, 6, 25, 183, 10, 25, 13, 25, 14, 25, 184, 3, 25, 3, 25, 6, 25, 189, 10, 25, 13, 25, 14, 25, 190, 7, 25, 193, 10, 25, 12, 25, 14, 25, 196, 11, 25, 3, 26, 3, 26, 7, 26, 200, 10, 26, 12, 26, 14, 26, 203, 11, 26, 3, 26, 3, 26, 3, 27, 3, 27, 7, 27, 209, 10, 27, 12, 27, 14, 27, 212, 11, 27, 3, 27, 3, 27, 3, 28, 3, 28, 3, 29, 3, 29, 3, 30, 3, 30, 3, 31, 3, 31, 3, 32, 3, 32, 3, 33, 3, 33, 3, 34, 3, 34, 3, 34, 2, 2, 35, 3, 3, 5, 4, 7, 5, 9, 6, 11, 7, 13, 8, 15, 9, 17, 10, 19, 11, 21, 12, 23, 13, 25, 14, 27, 15, 29, 16, 31, 17, 33, 18, 35, 19, 37, 20, 39, 21, 41, 22, 43, 23, 45, 24, 47, 25, 49, 26, 51, 27, 53, 28, 55, 29, 57, 30, 59, 31, 61, 32, 63, 33, 65, 34, 67, 35, 3, 2, 11, 3, 2, 47, 47, 3, 2, 50, 59, 5, 2, 11, 12, 15, 15, 34, 34, 5, 2, 67, 92, 97, 97, 99, 124, 6, 2, 50, 59, 67, 92, 97, 97, 99, 124, 6, 2, 47, 59, 67, 92, 97, 97, 99, 124, 7, 2, 47, 47, 49, 59, 67, 92, 97, 97, 99, 124, 5, 2, 12, 12, 15, 15, 41, 41, 5, 2, 12, 12, 15, 15, 36, 36, 2, 246, 2, 3, 3, 2, 2, 2, 2, 5, 3, 2, 2, 2, 2, 7, 3, 2, 2, 2, 2, 9, 3, 2, 2, 2, 2, 11, 3, 2, 2, 2, 2, 13, 3, 2, 2, 2, 2, 15, 3, 2, 2, 2, 2, 17, 3, 2, 2, 2, 2, 19, 3, 2, 2, 2, 2, 21, 3, 2, 2, 2, 2, 23, 3, 2, 2, 2, 2, 25, 3, 2, 2, 2, 2, 27, 3, 2, 2, 2, 2, 29, 3, 2, 2, 2, 2, 31, 3, 2, 2, 2, 2, 33, 3, 2, 2, 2, 2, 35, 3, 2, 2, 2, 2, 37, 3, 2, 2, 2, 2, 39, 3, 2, 2, 2, 2, 41, 3, 2, 2, 2, 2, 43, 3, 2, 2, 2, 2, 45, 3, 2, 2, 2, 2, 47, 3, 2, 2, 2, 2, 49, 3, 2, 2, 2, 2, 51, 3, 2, 2, 2, 2, 53, 3, 2, 2, 2, 2, 55, 3, 2, 2, 2, 2, 57, 3, 2, 2, 2, 2, 59, 3, 2, 2, 2, 2, 61, 3, 2, 2, 2, 2, 63, 3, 2, 2, 2, 2, 65, 3, 2, 2, 2, 2, 67, 3, 2, 2, 2, 3, 69, 3, 2, 2, 2, 5, 71, 3, 2, 2, 2, 7, 73, 3, 2, 2, 2, 9, 75, 3, 2, 2, 2, 11, 78, 3, 2, 2, 2, 13, 81, 3, 2, 2, 2, 15, 83, 3, 2, 2, 2, 17, 85, 3, 2, 2, 2, 19, 88, 3, 2, 2, 2, 21, 91, 3, 2, 2, 2, 23, 93, 3, 2, 2, 2, 25, 95, 3, 2, 2, 2, 27, 97, 3, 2, 2, 2, 29, 99, 3, 2, 2, 2, 31, 107, 3, 2, 2, 2, 33, 113, 3, 2, 2, 2, 35, 123, 3, 2, 2, 2, 37, 135, 3, 2, 2, 2, 39, 138, 3, 2, 2, 2, 41, 152, 3, 2, 2, 2, 43, 160, 3, 2, 2, 2, 45, 172, 3, 2, 2, 2, 47, 174, 3, 2, 2, 2, 49, 182, 3, 2, 2, 2, 51, 197, 3, 2, 2, 2, 53, 206, 3, 2, 2, 2, 55, 215, 3, 2, 2, 2, 57, 217, 3, 2, 2, 2, 59, 219, 3, 2, 2, 2, 61, 221, 3, 2, 2, 2, 63, 223, 3, 2, 2, 2, 65, 225, 3, 2, 2, 2, 67, 227, 3, 2, 2, 2, 69, 70, 7, 126, 2, 2, 70, 4, 3, 2, 2, 2, 71, 72, 7, 46, 2, 2, 72, 6, 3, 2, 2, 2, 73, 74, 7, 60, 2, 2, 74, 8, 3, 2, 2, 2, 75, 76, 7, 63, 2, 2, 76, 77, 7, 63, 2, 2, 77, 10, 3, 2, 2, 2, 78, 79, 7, 35, 2, 2, 79, 80, 7, 63, 2, 2, 80, 12, 3, 2, 2, 2, 81, 82, 7, 64, 2, 2, 82, 14, 3, 2, 2, 2, 83, 84, 7, 62, 2, 2, 84, 16, 3, 2, 2, 2, 85, 86, 7, 64, 2, 2, 86, 87, 7, 63, 2, 2, 87, 18, 3, 2, 2, 2, 88, 89, 7, 62, 2, 2, 89, 90, 7, 63, 2, 2, 90, 20, 3, 2, 2, 2, 91, 92, 7, 45, 2, 2, 92, 22, 3, 2, 2, 2, 93, 94, 7, 47, 2, 2, 94, 24, 3, 2, 2, 2, 95, 96, 7, 44, 2, 2, 96, 26, 3, 2, 2, 2, 97, 98, 7, 49, 2, 2, 98, 28, 3, 2, 2, 2, 99, 100, 7, 39, 2, 2, 100, 30, 3, 2, 2, 2, 101, 102, 7, 99, 2, 2, 102, 103, 7, 112, 2, 2, 103, 108, 7, 102, 2, 2, 104, 105, 7, 67, 2, 2, 105, 106, 7, 80, 2, 2, 106, 108, 7, 70, 2, 2, 107, 101, 3, 2, 2, 2, 107, 104, 3, 2, 2, 2, 108, 32, 3, 2, 2, 2, 109, 110, 7, 113, 2, 2, 110, 114, 7, 116, 2, 2, 111, 112, 7, 81, 2, 2, 112, 114, 7, 84, 2, 2, 113, 109, 3, 2, 2, 2, 113, 111, 3, 2, 2, 2, 114, 34, 3, 2, 2, 2, 115, 116, 7, 118, 2, 2, 116, 117, 7, 116, 2, 2, 117, 118, 7, 119, 2, 2, 118, 124, 7, 103, 2, 2, 119, 120, 7, 86, 2, 2, 120, 121, 7, 84, 2, 2, 121, 122, 7, 87, 2, 2, 122, 124, 7, 71, 2, 2, 123, 115, 3, 2, 2, 2, 123, 119, 3, 2, 2, 2, 124, 36, 3, 2, 2, 2, 125, 126, 7, 104, 2, 2, 126, 127, 7, 99, 2, 2, 127, 128, 7, 110, 2, 2, 128, 129, 7, 117, 2, 2, 129, 136, 7, 103, 2, 2, 130, 131, 7, 72, 2, 2, 131, 132, 7, 67, 2, 2, 132, 133, 7, 78, 2, 2, 133, 134, 7, 85, 2, 2, 134, 136, 7, 71, 2, 2, 135, 125, 3, 2, 2, 2, 135, 130, 3, 2, 2, 2, 136, 38, 3, 2, 2, 2, 137, 139, 9, 2, 2, 2, 138, 137, 3, 2, 2, 2, 138, 139, 3, 2, 2, 2, 139, 141, 3, 2, 2, 2, 140, 142, 9, 3, 2, 2, 141, 140, 3, 2, 2, 2, 142, 143, 3, 2, 2, 2, 143, 141, 3, 2, 2, 2, 143, 144, 3, 2, 2, 2, 144, 145, 3, 2, 2, 2, 145, 147, 7, 48, 2, 2, 146, 148, 9, 3, 2, 2, 147, 146, 3, 2, 2, 2, 148, 149, 3, 2, 2, 2, 149, 147, 3, 2, 2, 2, 149, 150, 3, 2, 2, 2, 150, 40, 3, 2, 2, 2, 151, 153, 9, 2, 2, 2, 152, 151, 3, 2, 2, 2, 152, 153, 3, 2, 2, 2, 153, 155, 3, 2, 2, 2, 154, 156, 9, 3, 2, 2, 155, 154, 3, 2, 2, 2, 156, 157, 3, 2, 2, 2, 157, 155, 3, 2, 2, 2, 157, 158, 3, 2, 2, 2, 158, 42, 3, 2, 2, 2, 159, 161, 9, 4, 2, 2, 160, 159, 3, 2, 2, 2, 161, 162, 3, 2, 2, 2, 162, 160, 3, 2, 2, 2, 162, 163, 3, 2, 2, 2, 163, 164, 3, 2, 2, 2, 164, 165, 8, 22, 2, 2, 165, 44, 3, 2, 2, 2, 166, 167, 7, 80, 2, 2, 167, 168, 7, 81, 2, 2, 168, 173, 7, 86, 2, 2, 169, 170, 7, 112, 2, 2, 170, 171, 7, 113, 2, 2, 171, 173, 7, 118, 2, 2, 172, 166, 3, 2, 2, 2, 172, 169, 3, 2, 2, 2, 173, 46, 3, 2, 2, 2, 174, 178, 9, 5, 2, 2, 175, 177, 9, 6, 2, 2, 176, 175, 3, 2, 2, 2, 177, 180, 3, 2, 2, 2, 178, 176, 3, 2, 2, 2, 178, 179, 3, 2, 2, 2, 179, 48, 3, 2, 2, 2, 180, 178, 3, 2, 2, 2, 181, 183, 9, 7, 2, 2, 182, 181, 3, 2, 2, 2, 183, 184, 3, 2, 2, 2, 184, 182, 3, 2, 2, 2, 184, 185, 3, 2, 2, 2, 185, 194, 3, 2, 2, 2, 186, 188, 7, 48, 2, 2, 187, 189, 9, 8, 2, 2, 188, 187, 3, 2, 2, 2, 189, 190, 3, 2, 2, 2, 190, 188, 3, 2, 2, 2, 190, 191, 3, 2, 2, 2, 191, 193, 3, 2, 2, 2, 192, 186, 3, 2, 2, 2, 193, 196, 3, 2, 2, 2, 194, 192, 3, 2, 2, 2, 194, 195, 3, 2, 2, 2, 195, 50, 3, 2, 2, 2, 196, 194, 3, 2, 2, 2, 197, 201, 7, 41, 2, 2, 198, 200, 10, 9, 2, 2, 199, 198, 3, 2, 2, 2, 200, 203, 3, 2, 2, 2, 201, 199, 3, 2, 2, 2, 201, 202, 3, 2, 2, 2, 202, 204, 3, 2, 2, 2, 203, 201, 3, 2, 2, 2, 204, 205, 7, 41, 2, 2, 205, 52, 3, 2, 2, 2, 206, 210, 7, 36, 2, 2, 207, 209, 10, 10, 2, 2, 208, 207, 3, 2, 2, 2, 209, 212, 3, 2, 2, 2, 210, 208, 3, 2, 2, 2, 210, 211, 3, 2, 2, 2, 211, 213, 3, 2, 2, 2, 212, 210, 3, 2, 2, 2, 213, 214, 7, 36, 2, 2, 214, 54, 3, 2, 2, 2, 215, 216, 7, 42, 2, 2, 216, 56, 3, 2, 2, 2, 217, 218, 7, 43, 2, 2, 218, 58, 3, 2, 2, 2, 219, 220, 7, 93, 2, 2, 220, 60, 3, 2, 2, 2, 221, 222, 7, 95, 2, 2, 222, 62, 3, 2, 2, 2, 223, 224, 7, 125, 2, 2, 224, 64, 3, 2, 2, 2, 225, 226, 7, 127, 2, 2, 226, 66, 3, 2, 2, 2, 227, 228, 7, 38, 2, 2, 228, 229, 7, 125, 2, 2, 229, 68, 3, 2, 2, 2, 20, 2, 107, 113, 123, 135, 138, 143, 149, 152, 157, 162, 172, 178, 184, 190, 194, 201, 210, 3, 8, 2, 2] \ No newline at end of file diff --git a/internal/pkg/eql/parser/eql_lexer.go b/internal/pkg/eql/parser/eql_lexer.go index d817439a858..b8eb1eeed6d 100644 --- a/internal/pkg/eql/parser/eql_lexer.go +++ b/internal/pkg/eql/parser/eql_lexer.go @@ -48,83 +48,83 @@ var serializedLexerAtn = []uint16{ 20, 39, 21, 41, 22, 43, 23, 45, 24, 47, 25, 49, 26, 51, 27, 53, 28, 55, 29, 57, 30, 59, 31, 61, 32, 63, 33, 65, 34, 67, 35, 3, 2, 11, 3, 2, 47, 47, 3, 2, 50, 59, 5, 2, 11, 12, 15, 15, 34, 34, 5, 2, 67, 92, 97, 97, 99, - 124, 6, 2, 50, 59, 67, 92, 97, 97, 99, 124, 7, 2, 47, 48, 50, 59, 67, 92, - 97, 97, 99, 124, 7, 2, 47, 47, 50, 59, 67, 92, 97, 97, 99, 124, 5, 2, 12, - 12, 15, 15, 41, 41, 5, 2, 12, 12, 15, 15, 36, 36, 2, 246, 2, 3, 3, 2, 2, - 2, 2, 5, 3, 2, 2, 2, 2, 7, 3, 2, 2, 2, 2, 9, 3, 2, 2, 2, 2, 11, 3, 2, 2, - 2, 2, 13, 3, 2, 2, 2, 2, 15, 3, 2, 2, 2, 2, 17, 3, 2, 2, 2, 2, 19, 3, 2, - 2, 2, 2, 21, 3, 2, 2, 2, 2, 23, 3, 2, 2, 2, 2, 25, 3, 2, 2, 2, 2, 27, 3, - 2, 2, 2, 2, 29, 3, 2, 2, 2, 2, 31, 3, 2, 2, 2, 2, 33, 3, 2, 2, 2, 2, 35, - 3, 2, 2, 2, 2, 37, 3, 2, 2, 2, 2, 39, 3, 2, 2, 2, 2, 41, 3, 2, 2, 2, 2, - 43, 3, 2, 2, 2, 2, 45, 3, 2, 2, 2, 2, 47, 3, 2, 2, 2, 2, 49, 3, 2, 2, 2, - 2, 51, 3, 2, 2, 2, 2, 53, 3, 2, 2, 2, 2, 55, 3, 2, 2, 2, 2, 57, 3, 2, 2, - 2, 2, 59, 3, 2, 2, 2, 2, 61, 3, 2, 2, 2, 2, 63, 3, 2, 2, 2, 2, 65, 3, 2, - 2, 2, 2, 67, 3, 2, 2, 2, 3, 69, 3, 2, 2, 2, 5, 71, 3, 2, 2, 2, 7, 73, 3, - 2, 2, 2, 9, 75, 3, 2, 2, 2, 11, 78, 3, 2, 2, 2, 13, 81, 3, 2, 2, 2, 15, - 83, 3, 2, 2, 2, 17, 85, 3, 2, 2, 2, 19, 88, 3, 2, 2, 2, 21, 91, 3, 2, 2, - 2, 23, 93, 3, 2, 2, 2, 25, 95, 3, 2, 2, 2, 27, 97, 3, 2, 2, 2, 29, 99, - 3, 2, 2, 2, 31, 107, 3, 2, 2, 2, 33, 113, 3, 2, 2, 2, 35, 123, 3, 2, 2, - 2, 37, 135, 3, 2, 2, 2, 39, 138, 3, 2, 2, 2, 41, 152, 3, 2, 2, 2, 43, 160, - 3, 2, 2, 2, 45, 172, 3, 2, 2, 2, 47, 174, 3, 2, 2, 2, 49, 182, 3, 2, 2, - 2, 51, 197, 3, 2, 2, 2, 53, 206, 3, 2, 2, 2, 55, 215, 3, 2, 2, 2, 57, 217, - 3, 2, 2, 2, 59, 219, 3, 2, 2, 2, 61, 221, 3, 2, 2, 2, 63, 223, 3, 2, 2, - 2, 65, 225, 3, 2, 2, 2, 67, 227, 3, 2, 2, 2, 69, 70, 7, 126, 2, 2, 70, - 4, 3, 2, 2, 2, 71, 72, 7, 46, 2, 2, 72, 6, 3, 2, 2, 2, 73, 74, 7, 60, 2, - 2, 74, 8, 3, 2, 2, 2, 75, 76, 7, 63, 2, 2, 76, 77, 7, 63, 2, 2, 77, 10, - 3, 2, 2, 2, 78, 79, 7, 35, 2, 2, 79, 80, 7, 63, 2, 2, 80, 12, 3, 2, 2, - 2, 81, 82, 7, 64, 2, 2, 82, 14, 3, 2, 2, 2, 83, 84, 7, 62, 2, 2, 84, 16, - 3, 2, 2, 2, 85, 86, 7, 64, 2, 2, 86, 87, 7, 63, 2, 2, 87, 18, 3, 2, 2, - 2, 88, 89, 7, 62, 2, 2, 89, 90, 7, 63, 2, 2, 90, 20, 3, 2, 2, 2, 91, 92, - 7, 45, 2, 2, 92, 22, 3, 2, 2, 2, 93, 94, 7, 47, 2, 2, 94, 24, 3, 2, 2, - 2, 95, 96, 7, 44, 2, 2, 96, 26, 3, 2, 2, 2, 97, 98, 7, 49, 2, 2, 98, 28, - 3, 2, 2, 2, 99, 100, 7, 39, 2, 2, 100, 30, 3, 2, 2, 2, 101, 102, 7, 99, - 2, 2, 102, 103, 7, 112, 2, 2, 103, 108, 7, 102, 2, 2, 104, 105, 7, 67, - 2, 2, 105, 106, 7, 80, 2, 2, 106, 108, 7, 70, 2, 2, 107, 101, 3, 2, 2, - 2, 107, 104, 3, 2, 2, 2, 108, 32, 3, 2, 2, 2, 109, 110, 7, 113, 2, 2, 110, - 114, 7, 116, 2, 2, 111, 112, 7, 81, 2, 2, 112, 114, 7, 84, 2, 2, 113, 109, - 3, 2, 2, 2, 113, 111, 3, 2, 2, 2, 114, 34, 3, 2, 2, 2, 115, 116, 7, 118, - 2, 2, 116, 117, 7, 116, 2, 2, 117, 118, 7, 119, 2, 2, 118, 124, 7, 103, - 2, 2, 119, 120, 7, 86, 2, 2, 120, 121, 7, 84, 2, 2, 121, 122, 7, 87, 2, - 2, 122, 124, 7, 71, 2, 2, 123, 115, 3, 2, 2, 2, 123, 119, 3, 2, 2, 2, 124, - 36, 3, 2, 2, 2, 125, 126, 7, 104, 2, 2, 126, 127, 7, 99, 2, 2, 127, 128, - 7, 110, 2, 2, 128, 129, 7, 117, 2, 2, 129, 136, 7, 103, 2, 2, 130, 131, - 7, 72, 2, 2, 131, 132, 7, 67, 2, 2, 132, 133, 7, 78, 2, 2, 133, 134, 7, - 85, 2, 2, 134, 136, 7, 71, 2, 2, 135, 125, 3, 2, 2, 2, 135, 130, 3, 2, - 2, 2, 136, 38, 3, 2, 2, 2, 137, 139, 9, 2, 2, 2, 138, 137, 3, 2, 2, 2, - 138, 139, 3, 2, 2, 2, 139, 141, 3, 2, 2, 2, 140, 142, 9, 3, 2, 2, 141, - 140, 3, 2, 2, 2, 142, 143, 3, 2, 2, 2, 143, 141, 3, 2, 2, 2, 143, 144, - 3, 2, 2, 2, 144, 145, 3, 2, 2, 2, 145, 147, 7, 48, 2, 2, 146, 148, 9, 3, - 2, 2, 147, 146, 3, 2, 2, 2, 148, 149, 3, 2, 2, 2, 149, 147, 3, 2, 2, 2, - 149, 150, 3, 2, 2, 2, 150, 40, 3, 2, 2, 2, 151, 153, 9, 2, 2, 2, 152, 151, - 3, 2, 2, 2, 152, 153, 3, 2, 2, 2, 153, 155, 3, 2, 2, 2, 154, 156, 9, 3, - 2, 2, 155, 154, 3, 2, 2, 2, 156, 157, 3, 2, 2, 2, 157, 155, 3, 2, 2, 2, - 157, 158, 3, 2, 2, 2, 158, 42, 3, 2, 2, 2, 159, 161, 9, 4, 2, 2, 160, 159, - 3, 2, 2, 2, 161, 162, 3, 2, 2, 2, 162, 160, 3, 2, 2, 2, 162, 163, 3, 2, - 2, 2, 163, 164, 3, 2, 2, 2, 164, 165, 8, 22, 2, 2, 165, 44, 3, 2, 2, 2, - 166, 167, 7, 80, 2, 2, 167, 168, 7, 81, 2, 2, 168, 173, 7, 86, 2, 2, 169, - 170, 7, 112, 2, 2, 170, 171, 7, 113, 2, 2, 171, 173, 7, 118, 2, 2, 172, - 166, 3, 2, 2, 2, 172, 169, 3, 2, 2, 2, 173, 46, 3, 2, 2, 2, 174, 178, 9, - 5, 2, 2, 175, 177, 9, 6, 2, 2, 176, 175, 3, 2, 2, 2, 177, 180, 3, 2, 2, - 2, 178, 176, 3, 2, 2, 2, 178, 179, 3, 2, 2, 2, 179, 48, 3, 2, 2, 2, 180, - 178, 3, 2, 2, 2, 181, 183, 9, 7, 2, 2, 182, 181, 3, 2, 2, 2, 183, 184, - 3, 2, 2, 2, 184, 182, 3, 2, 2, 2, 184, 185, 3, 2, 2, 2, 185, 194, 3, 2, - 2, 2, 186, 188, 7, 48, 2, 2, 187, 189, 9, 8, 2, 2, 188, 187, 3, 2, 2, 2, - 189, 190, 3, 2, 2, 2, 190, 188, 3, 2, 2, 2, 190, 191, 3, 2, 2, 2, 191, - 193, 3, 2, 2, 2, 192, 186, 3, 2, 2, 2, 193, 196, 3, 2, 2, 2, 194, 192, - 3, 2, 2, 2, 194, 195, 3, 2, 2, 2, 195, 50, 3, 2, 2, 2, 196, 194, 3, 2, - 2, 2, 197, 201, 7, 41, 2, 2, 198, 200, 10, 9, 2, 2, 199, 198, 3, 2, 2, - 2, 200, 203, 3, 2, 2, 2, 201, 199, 3, 2, 2, 2, 201, 202, 3, 2, 2, 2, 202, - 204, 3, 2, 2, 2, 203, 201, 3, 2, 2, 2, 204, 205, 7, 41, 2, 2, 205, 52, - 3, 2, 2, 2, 206, 210, 7, 36, 2, 2, 207, 209, 10, 10, 2, 2, 208, 207, 3, - 2, 2, 2, 209, 212, 3, 2, 2, 2, 210, 208, 3, 2, 2, 2, 210, 211, 3, 2, 2, - 2, 211, 213, 3, 2, 2, 2, 212, 210, 3, 2, 2, 2, 213, 214, 7, 36, 2, 2, 214, - 54, 3, 2, 2, 2, 215, 216, 7, 42, 2, 2, 216, 56, 3, 2, 2, 2, 217, 218, 7, - 43, 2, 2, 218, 58, 3, 2, 2, 2, 219, 220, 7, 93, 2, 2, 220, 60, 3, 2, 2, - 2, 221, 222, 7, 95, 2, 2, 222, 62, 3, 2, 2, 2, 223, 224, 7, 125, 2, 2, - 224, 64, 3, 2, 2, 2, 225, 226, 7, 127, 2, 2, 226, 66, 3, 2, 2, 2, 227, - 228, 7, 38, 2, 2, 228, 229, 7, 125, 2, 2, 229, 68, 3, 2, 2, 2, 20, 2, 107, - 113, 123, 135, 138, 143, 149, 152, 157, 162, 172, 178, 184, 190, 194, 201, - 210, 3, 8, 2, 2, + 124, 6, 2, 50, 59, 67, 92, 97, 97, 99, 124, 6, 2, 47, 59, 67, 92, 97, 97, + 99, 124, 7, 2, 47, 47, 49, 59, 67, 92, 97, 97, 99, 124, 5, 2, 12, 12, 15, + 15, 41, 41, 5, 2, 12, 12, 15, 15, 36, 36, 2, 246, 2, 3, 3, 2, 2, 2, 2, + 5, 3, 2, 2, 2, 2, 7, 3, 2, 2, 2, 2, 9, 3, 2, 2, 2, 2, 11, 3, 2, 2, 2, 2, + 13, 3, 2, 2, 2, 2, 15, 3, 2, 2, 2, 2, 17, 3, 2, 2, 2, 2, 19, 3, 2, 2, 2, + 2, 21, 3, 2, 2, 2, 2, 23, 3, 2, 2, 2, 2, 25, 3, 2, 2, 2, 2, 27, 3, 2, 2, + 2, 2, 29, 3, 2, 2, 2, 2, 31, 3, 2, 2, 2, 2, 33, 3, 2, 2, 2, 2, 35, 3, 2, + 2, 2, 2, 37, 3, 2, 2, 2, 2, 39, 3, 2, 2, 2, 2, 41, 3, 2, 2, 2, 2, 43, 3, + 2, 2, 2, 2, 45, 3, 2, 2, 2, 2, 47, 3, 2, 2, 2, 2, 49, 3, 2, 2, 2, 2, 51, + 3, 2, 2, 2, 2, 53, 3, 2, 2, 2, 2, 55, 3, 2, 2, 2, 2, 57, 3, 2, 2, 2, 2, + 59, 3, 2, 2, 2, 2, 61, 3, 2, 2, 2, 2, 63, 3, 2, 2, 2, 2, 65, 3, 2, 2, 2, + 2, 67, 3, 2, 2, 2, 3, 69, 3, 2, 2, 2, 5, 71, 3, 2, 2, 2, 7, 73, 3, 2, 2, + 2, 9, 75, 3, 2, 2, 2, 11, 78, 3, 2, 2, 2, 13, 81, 3, 2, 2, 2, 15, 83, 3, + 2, 2, 2, 17, 85, 3, 2, 2, 2, 19, 88, 3, 2, 2, 2, 21, 91, 3, 2, 2, 2, 23, + 93, 3, 2, 2, 2, 25, 95, 3, 2, 2, 2, 27, 97, 3, 2, 2, 2, 29, 99, 3, 2, 2, + 2, 31, 107, 3, 2, 2, 2, 33, 113, 3, 2, 2, 2, 35, 123, 3, 2, 2, 2, 37, 135, + 3, 2, 2, 2, 39, 138, 3, 2, 2, 2, 41, 152, 3, 2, 2, 2, 43, 160, 3, 2, 2, + 2, 45, 172, 3, 2, 2, 2, 47, 174, 3, 2, 2, 2, 49, 182, 3, 2, 2, 2, 51, 197, + 3, 2, 2, 2, 53, 206, 3, 2, 2, 2, 55, 215, 3, 2, 2, 2, 57, 217, 3, 2, 2, + 2, 59, 219, 3, 2, 2, 2, 61, 221, 3, 2, 2, 2, 63, 223, 3, 2, 2, 2, 65, 225, + 3, 2, 2, 2, 67, 227, 3, 2, 2, 2, 69, 70, 7, 126, 2, 2, 70, 4, 3, 2, 2, + 2, 71, 72, 7, 46, 2, 2, 72, 6, 3, 2, 2, 2, 73, 74, 7, 60, 2, 2, 74, 8, + 3, 2, 2, 2, 75, 76, 7, 63, 2, 2, 76, 77, 7, 63, 2, 2, 77, 10, 3, 2, 2, + 2, 78, 79, 7, 35, 2, 2, 79, 80, 7, 63, 2, 2, 80, 12, 3, 2, 2, 2, 81, 82, + 7, 64, 2, 2, 82, 14, 3, 2, 2, 2, 83, 84, 7, 62, 2, 2, 84, 16, 3, 2, 2, + 2, 85, 86, 7, 64, 2, 2, 86, 87, 7, 63, 2, 2, 87, 18, 3, 2, 2, 2, 88, 89, + 7, 62, 2, 2, 89, 90, 7, 63, 2, 2, 90, 20, 3, 2, 2, 2, 91, 92, 7, 45, 2, + 2, 92, 22, 3, 2, 2, 2, 93, 94, 7, 47, 2, 2, 94, 24, 3, 2, 2, 2, 95, 96, + 7, 44, 2, 2, 96, 26, 3, 2, 2, 2, 97, 98, 7, 49, 2, 2, 98, 28, 3, 2, 2, + 2, 99, 100, 7, 39, 2, 2, 100, 30, 3, 2, 2, 2, 101, 102, 7, 99, 2, 2, 102, + 103, 7, 112, 2, 2, 103, 108, 7, 102, 2, 2, 104, 105, 7, 67, 2, 2, 105, + 106, 7, 80, 2, 2, 106, 108, 7, 70, 2, 2, 107, 101, 3, 2, 2, 2, 107, 104, + 3, 2, 2, 2, 108, 32, 3, 2, 2, 2, 109, 110, 7, 113, 2, 2, 110, 114, 7, 116, + 2, 2, 111, 112, 7, 81, 2, 2, 112, 114, 7, 84, 2, 2, 113, 109, 3, 2, 2, + 2, 113, 111, 3, 2, 2, 2, 114, 34, 3, 2, 2, 2, 115, 116, 7, 118, 2, 2, 116, + 117, 7, 116, 2, 2, 117, 118, 7, 119, 2, 2, 118, 124, 7, 103, 2, 2, 119, + 120, 7, 86, 2, 2, 120, 121, 7, 84, 2, 2, 121, 122, 7, 87, 2, 2, 122, 124, + 7, 71, 2, 2, 123, 115, 3, 2, 2, 2, 123, 119, 3, 2, 2, 2, 124, 36, 3, 2, + 2, 2, 125, 126, 7, 104, 2, 2, 126, 127, 7, 99, 2, 2, 127, 128, 7, 110, + 2, 2, 128, 129, 7, 117, 2, 2, 129, 136, 7, 103, 2, 2, 130, 131, 7, 72, + 2, 2, 131, 132, 7, 67, 2, 2, 132, 133, 7, 78, 2, 2, 133, 134, 7, 85, 2, + 2, 134, 136, 7, 71, 2, 2, 135, 125, 3, 2, 2, 2, 135, 130, 3, 2, 2, 2, 136, + 38, 3, 2, 2, 2, 137, 139, 9, 2, 2, 2, 138, 137, 3, 2, 2, 2, 138, 139, 3, + 2, 2, 2, 139, 141, 3, 2, 2, 2, 140, 142, 9, 3, 2, 2, 141, 140, 3, 2, 2, + 2, 142, 143, 3, 2, 2, 2, 143, 141, 3, 2, 2, 2, 143, 144, 3, 2, 2, 2, 144, + 145, 3, 2, 2, 2, 145, 147, 7, 48, 2, 2, 146, 148, 9, 3, 2, 2, 147, 146, + 3, 2, 2, 2, 148, 149, 3, 2, 2, 2, 149, 147, 3, 2, 2, 2, 149, 150, 3, 2, + 2, 2, 150, 40, 3, 2, 2, 2, 151, 153, 9, 2, 2, 2, 152, 151, 3, 2, 2, 2, + 152, 153, 3, 2, 2, 2, 153, 155, 3, 2, 2, 2, 154, 156, 9, 3, 2, 2, 155, + 154, 3, 2, 2, 2, 156, 157, 3, 2, 2, 2, 157, 155, 3, 2, 2, 2, 157, 158, + 3, 2, 2, 2, 158, 42, 3, 2, 2, 2, 159, 161, 9, 4, 2, 2, 160, 159, 3, 2, + 2, 2, 161, 162, 3, 2, 2, 2, 162, 160, 3, 2, 2, 2, 162, 163, 3, 2, 2, 2, + 163, 164, 3, 2, 2, 2, 164, 165, 8, 22, 2, 2, 165, 44, 3, 2, 2, 2, 166, + 167, 7, 80, 2, 2, 167, 168, 7, 81, 2, 2, 168, 173, 7, 86, 2, 2, 169, 170, + 7, 112, 2, 2, 170, 171, 7, 113, 2, 2, 171, 173, 7, 118, 2, 2, 172, 166, + 3, 2, 2, 2, 172, 169, 3, 2, 2, 2, 173, 46, 3, 2, 2, 2, 174, 178, 9, 5, + 2, 2, 175, 177, 9, 6, 2, 2, 176, 175, 3, 2, 2, 2, 177, 180, 3, 2, 2, 2, + 178, 176, 3, 2, 2, 2, 178, 179, 3, 2, 2, 2, 179, 48, 3, 2, 2, 2, 180, 178, + 3, 2, 2, 2, 181, 183, 9, 7, 2, 2, 182, 181, 3, 2, 2, 2, 183, 184, 3, 2, + 2, 2, 184, 182, 3, 2, 2, 2, 184, 185, 3, 2, 2, 2, 185, 194, 3, 2, 2, 2, + 186, 188, 7, 48, 2, 2, 187, 189, 9, 8, 2, 2, 188, 187, 3, 2, 2, 2, 189, + 190, 3, 2, 2, 2, 190, 188, 3, 2, 2, 2, 190, 191, 3, 2, 2, 2, 191, 193, + 3, 2, 2, 2, 192, 186, 3, 2, 2, 2, 193, 196, 3, 2, 2, 2, 194, 192, 3, 2, + 2, 2, 194, 195, 3, 2, 2, 2, 195, 50, 3, 2, 2, 2, 196, 194, 3, 2, 2, 2, + 197, 201, 7, 41, 2, 2, 198, 200, 10, 9, 2, 2, 199, 198, 3, 2, 2, 2, 200, + 203, 3, 2, 2, 2, 201, 199, 3, 2, 2, 2, 201, 202, 3, 2, 2, 2, 202, 204, + 3, 2, 2, 2, 203, 201, 3, 2, 2, 2, 204, 205, 7, 41, 2, 2, 205, 52, 3, 2, + 2, 2, 206, 210, 7, 36, 2, 2, 207, 209, 10, 10, 2, 2, 208, 207, 3, 2, 2, + 2, 209, 212, 3, 2, 2, 2, 210, 208, 3, 2, 2, 2, 210, 211, 3, 2, 2, 2, 211, + 213, 3, 2, 2, 2, 212, 210, 3, 2, 2, 2, 213, 214, 7, 36, 2, 2, 214, 54, + 3, 2, 2, 2, 215, 216, 7, 42, 2, 2, 216, 56, 3, 2, 2, 2, 217, 218, 7, 43, + 2, 2, 218, 58, 3, 2, 2, 2, 219, 220, 7, 93, 2, 2, 220, 60, 3, 2, 2, 2, + 221, 222, 7, 95, 2, 2, 222, 62, 3, 2, 2, 2, 223, 224, 7, 125, 2, 2, 224, + 64, 3, 2, 2, 2, 225, 226, 7, 127, 2, 2, 226, 66, 3, 2, 2, 2, 227, 228, + 7, 38, 2, 2, 228, 229, 7, 125, 2, 2, 229, 68, 3, 2, 2, 2, 20, 2, 107, 113, + 123, 135, 138, 143, 149, 152, 157, 162, 172, 178, 184, 190, 194, 201, 210, + 3, 8, 2, 2, } var lexerDeserializer = antlr.NewATNDeserializer(nil) From 8ef98f102abf196903ff08f394797cd7888f5ac2 Mon Sep 17 00:00:00 2001 From: Aleksandr Maus Date: Tue, 19 Jul 2022 16:04:38 -0400 Subject: [PATCH 052/180] Fix Elastic Agent non-fleet broken upgrade between 8.3.x releases (#701) * Fix Elastic Agent non-fleet broken upgrade between 8.3.x releases * Migrates vault directory on linux and windows to the top directory of the agent, so it can be shared without needing the upgrade handler call, like for example with side-by-side install/upgrade from .rpm/.deb * Extended vault to allow read-only open, useful when the vault at particular location needs to be only read not created. * Correct the typo in the log messages * Update lint flagged function comment with 'unused', was flagged with 'deadcode' on the previous run * Address code review feedback * Add missing import for linux utz * Change vault path from Top() to Config(), this a better location, next to fleet.enc based on the install/upgrade testing with .rpm/.deb installs * Fix the missing state migration for .rpm/.deb upgrade. The post install script now performs the migration and creates the symlink after that. * Fix typo in the postinstall script * Update the vault migration code, add the agent configuration match check with the agent secret --- dev-tools/packaging/packages.yml | 10 +- .../templates/linux/postinstall.sh.tmpl | 38 ++ .../agent/application/paths/paths_linux.go | 2 +- .../agent/application/paths/paths_windows.go | 2 +- .../pkg/agent/application/secret/secret.go | 41 +- .../pkg/agent/application/upgrade/upgrade.go | 37 -- internal/pkg/agent/cmd/run.go | 17 + .../pkg/agent/migration/migrate_secret.go | 163 ++++++++ .../agent/migration/migrate_secret_test.go | 386 ++++++++++++++++++ internal/pkg/agent/vault/seed.go | 27 ++ internal/pkg/agent/vault/seed_test.go | 43 +- internal/pkg/agent/vault/vault_darwin.go | 2 +- internal/pkg/agent/vault/vault_linux.go | 24 +- internal/pkg/agent/vault/vault_options.go | 28 ++ internal/pkg/agent/vault/vault_windows.go | 29 +- 15 files changed, 772 insertions(+), 77 deletions(-) create mode 100644 dev-tools/packaging/templates/linux/postinstall.sh.tmpl create mode 100644 internal/pkg/agent/migration/migrate_secret.go create mode 100644 internal/pkg/agent/migration/migrate_secret_test.go create mode 100644 internal/pkg/agent/vault/vault_options.go diff --git a/dev-tools/packaging/packages.yml b/dev-tools/packaging/packages.yml index acc89420081..bd5e9d1722c 100644 --- a/dev-tools/packaging/packages.yml +++ b/dev-tools/packaging/packages.yml @@ -20,11 +20,8 @@ shared: # Deb/RPM spec for community beats. - &deb_rpm_agent_spec <<: *common - post_install_script: '{{ elastic_beats_dir }}/dev-tools/packaging/files/linux/systemd-daemon-reload.sh' + post_install_script: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/linux/postinstall.sh.tmpl' files: - /usr/share/{{.BeatName}}/bin/{{.BeatName}}{{.BinaryExt}}: - source: build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} - mode: 0755 /usr/share/{{.BeatName}}/LICENSE.txt: source: '{{ repo.RootDir }}/LICENSE.txt' mode: 0644 @@ -1083,11 +1080,6 @@ specs: spec: <<: *deb_rpm_agent_spec <<: *elastic_license_for_deb_rpm - files: - /usr/share/{{.BeatName}}/bin/{{.BeatName}}{{.BinaryExt}}: - source: /var/lib/{{.BeatName}}/data/{{.BeatName}}-{{ commit_short }}/{{.BeatName}}{{.BinaryExt}} - symlink: true - mode: 0755 - os: linux arch: amd64 diff --git a/dev-tools/packaging/templates/linux/postinstall.sh.tmpl b/dev-tools/packaging/templates/linux/postinstall.sh.tmpl new file mode 100644 index 00000000000..083ebb91060 --- /dev/null +++ b/dev-tools/packaging/templates/linux/postinstall.sh.tmpl @@ -0,0 +1,38 @@ +#!/usr/bin/env bash + +set -e + +symlink="/usr/share/elastic-agent/bin/elastic-agent" +old_agent_dir="$( dirname "$(readlink -f -- "$symlink")" )" + +commit_hash="{{ commit_short }}" + +yml_path="$old_agent_dir/state.yml" +enc_path="$old_agent_dir/state.enc" + +new_agent_dir="$( dirname "$old_agent_dir")/elastic-agent-$commit_hash" + +if ! [[ "$old_agent_dir" -ef "$new_agent_dir" ]]; then + echo "migrate state from $old_agent_dir to $new_agent_dir" + + if test -f "$yml_path"; then + echo "found "$yml_path", copy to "$new_agent_dir"." + cp "$yml_path" "$new_agent_dir" + fi + + if test -f "$enc_path"; then + echo "found "$enc_path", copy to "$new_agent_dir"." + cp "$enc_path" "$new_agent_dir" + fi + + if test -f "$symlink"; then + echo "found symlink $symlink, unlink" + unlink "$symlink" + fi + + echo "create symlink "$symlink" to "$new_agent_dir/elastic-agent"" + ln -s "$new_agent_dir/elastic-agent" "$symlink" +fi + +systemctl daemon-reload 2> /dev/null +exit 0 diff --git a/internal/pkg/agent/application/paths/paths_linux.go b/internal/pkg/agent/application/paths/paths_linux.go index 22faeb5f75a..37cc57c33af 100644 --- a/internal/pkg/agent/application/paths/paths_linux.go +++ b/internal/pkg/agent/application/paths/paths_linux.go @@ -14,5 +14,5 @@ const defaultAgentVaultPath = "vault" // AgentVaultPath is the directory that contains all the files for the value func AgentVaultPath() string { - return filepath.Join(Home(), defaultAgentVaultPath) + return filepath.Join(Config(), defaultAgentVaultPath) } diff --git a/internal/pkg/agent/application/paths/paths_windows.go b/internal/pkg/agent/application/paths/paths_windows.go index 2fc6fd008a0..0b81aa2061b 100644 --- a/internal/pkg/agent/application/paths/paths_windows.go +++ b/internal/pkg/agent/application/paths/paths_windows.go @@ -42,5 +42,5 @@ func ArePathsEqual(expected, actual string) bool { // AgentVaultPath is the directory that contains all the files for the value func AgentVaultPath() string { - return filepath.Join(Home(), defaultAgentVaultPath) + return filepath.Join(Config(), defaultAgentVaultPath) } diff --git a/internal/pkg/agent/application/secret/secret.go b/internal/pkg/agent/application/secret/secret.go index edce9eda174..cf690bf24e8 100644 --- a/internal/pkg/agent/application/secret/secret.go +++ b/internal/pkg/agent/application/secret/secret.go @@ -6,6 +6,7 @@ package secret import ( "encoding/json" + "fmt" "runtime" "sync" "time" @@ -52,7 +53,7 @@ func Create(key string, opts ...OptionFunc) error { options := applyOptions(opts...) v, err := vault.New(options.vaultPath) if err != nil { - return err + return fmt.Errorf("could not create new vault: %w", err) } defer v.Close() @@ -80,12 +81,7 @@ func Create(key string, opts ...OptionFunc) error { CreatedOn: time.Now().UTC(), } - b, err := json.Marshal(secret) - if err != nil { - return err - } - - return v.Set(key, b) + return set(v, key, secret) } // GetAgentSecret read the agent secret from the vault @@ -93,10 +89,17 @@ func GetAgentSecret(opts ...OptionFunc) (secret Secret, err error) { return Get(agentSecretKey, opts...) } +// SetAgentSecret saves the agent secret from the vault +// This is needed for migration from 8.3.0-8.3.2 to higher versions +func SetAgentSecret(secret Secret, opts ...OptionFunc) error { + return Set(agentSecretKey, secret, opts...) +} + // Get reads the secret key from the vault func Get(key string, opts ...OptionFunc) (secret Secret, err error) { options := applyOptions(opts...) - v, err := vault.New(options.vaultPath) + // open vault readonly, will not create the vault directory or the seed it was not created before + v, err := vault.New(options.vaultPath, vault.WithReadonly(true)) if err != nil { return secret, err } @@ -111,12 +114,32 @@ func Get(key string, opts ...OptionFunc) (secret Secret, err error) { return secret, err } +// Set saves the secret key to the vault +func Set(key string, secret Secret, opts ...OptionFunc) error { + options := applyOptions(opts...) + v, err := vault.New(options.vaultPath) + if err != nil { + return fmt.Errorf("could not create new vault: %w", err) + } + defer v.Close() + return set(v, key, secret) +} + +func set(v *vault.Vault, key string, secret Secret) error { + b, err := json.Marshal(secret) + if err != nil { + return fmt.Errorf("could not marshal secret: %w", err) + } + + return v.Set(key, b) +} + // Remove removes the secret key from the vault func Remove(key string, opts ...OptionFunc) error { options := applyOptions(opts...) v, err := vault.New(options.vaultPath) if err != nil { - return err + return fmt.Errorf("could not create new vault: %w", err) } defer v.Close() diff --git a/internal/pkg/agent/application/upgrade/upgrade.go b/internal/pkg/agent/application/upgrade/upgrade.go index 9d67165d0eb..1d370cb5301 100644 --- a/internal/pkg/agent/application/upgrade/upgrade.go +++ b/internal/pkg/agent/application/upgrade/upgrade.go @@ -10,7 +10,6 @@ import ( "io/ioutil" "os" "path/filepath" - "runtime" "strings" "github.com/otiai10/copy" @@ -33,7 +32,6 @@ const ( agentName = "elastic-agent" hashLen = 6 agentCommitFile = ".elastic-agent.active.commit" - darwin = "darwin" ) var ( @@ -161,11 +159,6 @@ func (u *Upgrader) Upgrade(ctx context.Context, a Action, reexecNow bool) (_ ree return nil, nil } - // Copy vault directory for linux/windows only - if err := copyVault(newHash); err != nil { - return nil, errors.New(err, "failed to copy vault") - } - if err := copyActionStore(newHash); err != nil { return nil, errors.New(err, "failed to copy action store") } @@ -300,36 +293,6 @@ func copyActionStore(newHash string) error { return nil } -func getVaultPath(newHash string) string { - vaultPath := paths.AgentVaultPath() - if runtime.GOOS == darwin { - return vaultPath - } - newHome := filepath.Join(filepath.Dir(paths.Home()), fmt.Sprintf("%s-%s", agentName, newHash)) - return filepath.Join(newHome, filepath.Base(vaultPath)) -} - -// Copies the vault files for windows and linux -func copyVault(newHash string) error { - // No vault files to copy on darwin - if runtime.GOOS == darwin { - return nil - } - - vaultPath := paths.AgentVaultPath() - newVaultPath := getVaultPath(newHash) - - err := copyDir(vaultPath, newVaultPath) - if err != nil { - if os.IsNotExist(err) { - return nil - } - return err - } - - return nil -} - // shutdownCallback returns a callback function to be executing during shutdown once all processes are closed. // this goes through runtime directory of agent and copies all the state files created by processes to new versioned // home directory with updated process name to match new version. diff --git a/internal/pkg/agent/cmd/run.go b/internal/pkg/agent/cmd/run.go index ad508af9086..732831d87d8 100644 --- a/internal/pkg/agent/cmd/run.go +++ b/internal/pkg/agent/cmd/run.go @@ -37,6 +37,7 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/agent/configuration" "github.com/elastic/elastic-agent/internal/pkg/agent/control/server" "github.com/elastic/elastic-agent/internal/pkg/agent/errors" + "github.com/elastic/elastic-agent/internal/pkg/agent/migration" "github.com/elastic/elastic-agent/internal/pkg/agent/storage" "github.com/elastic/elastic-agent/internal/pkg/cli" "github.com/elastic/elastic-agent/internal/pkg/config" @@ -121,6 +122,22 @@ func run(override cfgOverrider) error { createAgentID = false } + // This is specific for the agent upgrade from 8.3.0 - 8.3.2 to 8.x and above on Linux and Windows platforms. + // Addresses the issue: https://github.com/elastic/elastic-agent/issues/682 + // The vault directory was located in the hash versioned "Home" directory of the agent. + // This moves the vault directory two levels up into the "Config" directory next to fleet.enc file + // in order to be able to "upgrade" the agent from deb/rpm that is not invoking the upgrade handle and + // doesn't perform the migration of the state or vault. + // If the agent secret doesn't exist, then search for the newest agent secret in the agent data directories + // and migrate it into the new vault location. + err = migration.MigrateAgentSecret(logger) + logger.Debug("migration of agent secret completed, err: %v", err) + if err != nil { + err = errors.New(err, "failed to perfrom the agent secret migration") + logger.Error(err) + return err + } + // Ensure we have the agent secret created. // The secret is not created here if it exists already from the previous enrollment. // This is needed for compatibility with agent running in standalone mode, diff --git a/internal/pkg/agent/migration/migrate_secret.go b/internal/pkg/agent/migration/migrate_secret.go new file mode 100644 index 00000000000..08cfc3e5eb1 --- /dev/null +++ b/internal/pkg/agent/migration/migrate_secret.go @@ -0,0 +1,163 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package migration + +import ( + "errors" + "fmt" + "io" + "io/fs" + "io/ioutil" + "os" + "path/filepath" + "runtime" + "strings" + + "github.com/elastic/elastic-agent-libs/logp" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/paths" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/secret" + "github.com/elastic/elastic-agent/internal/pkg/agent/storage" + "github.com/elastic/elastic-agent/internal/pkg/fileutil" +) + +const ( + darwin = "darwin" +) + +// MigrateAgentSecret migrates agent secret if the secret doesn't exists agent upgrade from 8.3.0 - 8.3.2 to 8.x and above on Linux and Windows platforms. +func MigrateAgentSecret(log *logp.Logger) error { + // Nothing to migrate for darwin + if runtime.GOOS == darwin { + return nil + } + + // Check if the secret already exists + log.Debug("migrate agent secret, check if secret already exists") + _, err := secret.GetAgentSecret() + if err != nil { + if errors.Is(err, fs.ErrNotExist) { + // The secret doesn't exists, perform migration below + log.Debug("agent secret doesn't exists, perform migration") + } else { + err = fmt.Errorf("failed read the agent secret: %w", err) + log.Error(err) + return err + } + } else { + // The secret already exists, nothing to migrate + log.Debug("secret already exists nothing to migrate") + return nil + } + + // Check if the secret was copied by the fleet upgrade handler to the legacy location + log.Debug("check if secret was copied over by 8.3.0-8.3.2 version of the agent") + sec, err := getAgentSecretFromHomePath(paths.Home()) + if err != nil { + if errors.Is(err, fs.ErrNotExist) { + // The secret is not found in this instance of the vault, continue with migration + log.Debug("agent secret copied from 8.3.0-8.3.2 doesn't exists, continue with migration") + } else { + err = fmt.Errorf("failed agent 8.3.0-8.3.2 secret check: %w", err) + log.Error(err) + return err + } + } else { + // The secret is found, save in the new agent vault + log.Debug("agent secret from 8.3.0-8.3.2 is found, migrate to the new vault") + return secret.SetAgentSecret(sec) + } + + // Scan other agent data directories, find the latest agent secret + log.Debug("search for possible latest agent 8.3.0-8.3.2 secret") + dataDir := paths.Data() + + sec, err = findPreviousAgentSecret(dataDir) + if err != nil { + if errors.Is(err, fs.ErrNotExist) { + // The secret is not found + log.Debug("no previous agent 8.3.0-8.3.2 secrets found, nothing to migrate") + return nil + } + err = fmt.Errorf("search for possible latest agent 8.3.0-8.3.2 secret failed: %w", err) + log.Error(err) + return err + } + log.Debug("found previous agent 8.3.0-8.3.2 secret, migrate to the new vault") + return secret.SetAgentSecret(sec) +} + +func findPreviousAgentSecret(dataDir string) (secret.Secret, error) { + found := false + var sec secret.Secret + fileSystem := os.DirFS(dataDir) + _ = fs.WalkDir(fileSystem, ".", func(path string, d fs.DirEntry, err error) error { + if err != nil { + return err + } + if d.IsDir() { + if strings.HasPrefix(d.Name(), "elastic-agent-") { + vaultPath := getLegacyVaultPathFromPath(filepath.Join(dataDir, path)) + s, err := secret.GetAgentSecret(secret.WithVaultPath(vaultPath)) + if err != nil { + // Ignore if fs.ErrNotExist error, keep scanning + if errors.Is(err, fs.ErrNotExist) { + return nil + } + return err + } + + // Check that the configuration can be decrypted with the found agent secret + exists, _ := fileutil.FileExists(paths.AgentConfigFile()) + if exists { + store := storage.NewEncryptedDiskStore(paths.AgentConfigFile(), storage.WithVaultPath(vaultPath)) + r, err := store.Load() + if err != nil { + //nolint:nilerr // ignore the error keep scanning + return nil + } + + defer r.Close() + _, err = ioutil.ReadAll(r) + if err != nil { + //nolint:nilerr // ignore the error keep scanning + return nil + } + + sec = s + found = true + return io.EOF + } + } else if d.Name() != "." { + return fs.SkipDir + } + } + return nil + }) + if !found { + return sec, fs.ErrNotExist + } + return sec, nil +} + +func getAgentSecretFromHomePath(homePath string) (sec secret.Secret, err error) { + vaultPath := getLegacyVaultPathFromPath(homePath) + fi, err := os.Stat(vaultPath) + if err != nil { + return + } + + if !fi.IsDir() { + return sec, fs.ErrNotExist + } + return secret.GetAgentSecret(secret.WithVaultPath(vaultPath)) +} + +func getLegacyVaultPath() string { + return getLegacyVaultPathFromPath(paths.Home()) +} + +func getLegacyVaultPathFromPath(path string) string { + return filepath.Join(path, "vault") +} diff --git a/internal/pkg/agent/migration/migrate_secret_test.go b/internal/pkg/agent/migration/migrate_secret_test.go new file mode 100644 index 00000000000..562549c6db8 --- /dev/null +++ b/internal/pkg/agent/migration/migrate_secret_test.go @@ -0,0 +1,386 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +//go:build linux || windows +// +build linux windows + +package migration + +import ( + "errors" + "io/fs" + "io/ioutil" + "os" + "path/filepath" + "strings" + "testing" + "time" + + "github.com/elastic/elastic-agent-libs/logp" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/paths" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/secret" + "github.com/elastic/elastic-agent/internal/pkg/agent/storage" + "github.com/elastic/elastic-agent/internal/pkg/agent/vault" + "github.com/gofrs/uuid" + "github.com/google/go-cmp/cmp" +) + +func TestFindAgentSecretFromHomePath(t *testing.T) { + + tests := []struct { + name string + setupFn func(homePath string) error + wantErr error + }{ + { + name: "no data dir", + wantErr: fs.ErrNotExist, + }, + { + name: "no vault dir", + setupFn: func(homePath string) error { + return os.MkdirAll(homePath, 0750) + }, + wantErr: fs.ErrNotExist, + }, + { + name: "vault file instead of directory", + setupFn: func(homePath string) error { + err := os.MkdirAll(homePath, 0750) + if err != nil { + return err + } + return ioutil.WriteFile(getLegacyVaultPathFromPath(homePath), []byte{}, 0600) + }, + wantErr: fs.ErrNotExist, + }, + { + name: "empty vault directory", + setupFn: func(homePath string) error { + return os.MkdirAll(getLegacyVaultPathFromPath(homePath), 0750) + }, + wantErr: fs.ErrNotExist, + }, + { + name: "empty vault", + setupFn: func(homePath string) error { + v, err := vault.New(getLegacyVaultPathFromPath(homePath)) + if err != nil { + return err + } + defer v.Close() + return nil + }, + wantErr: fs.ErrNotExist, + }, + { + name: "vault dir with no seed", + setupFn: func(homePath string) error { + vaultPath := getLegacyVaultPathFromPath(homePath) + v, err := vault.New(vaultPath) + if err != nil { + return err + } + defer v.Close() + return os.Remove(filepath.Join(vaultPath, ".seed")) + }, + wantErr: fs.ErrNotExist, + }, + { + name: "vault with secret and misplaced seed vault", + setupFn: func(homePath string) error { + vaultPath := getLegacyVaultPathFromPath(homePath) + err := secret.CreateAgentSecret(secret.WithVaultPath(vaultPath)) + if err != nil { + return err + } + return os.Remove(filepath.Join(vaultPath, ".seed")) + }, + wantErr: fs.ErrNotExist, + }, + { + name: "vault with valid secret", + setupFn: func(homePath string) error { + vaultPath := getLegacyVaultPathFromPath(homePath) + err := secret.CreateAgentSecret(secret.WithVaultPath(vaultPath)) + if err != nil { + return err + } + return generateTestConfig(vaultPath) + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + top := t.TempDir() + paths.SetTop(top) + homePath := paths.Home() + + if tc.setupFn != nil { + if err := tc.setupFn(homePath); err != nil { + t.Fatal(err) + } + } + + sec, err := getAgentSecretFromHomePath(homePath) + if !errors.Is(err, tc.wantErr) { + t.Fatalf("want err: %v, got err: %v", tc.wantErr, err) + } + + foundSec, err := findPreviousAgentSecret(filepath.Dir(homePath)) + if !errors.Is(err, tc.wantErr) { + t.Fatalf("want err: %v, got err: %v", tc.wantErr, err) + } + diff := cmp.Diff(sec, foundSec) + if diff != "" { + t.Fatal(diff) + } + + }) + } +} + +type configType int + +const ( + NoConfig configType = iota + MatchingConfig + NonMatchingConfig +) + +func TestFindNewestAgentSecret(t *testing.T) { + + tests := []struct { + name string + cfgType configType + wantErr error + }{ + { + name: "missing config", + cfgType: NoConfig, + wantErr: fs.ErrNotExist, + }, + { + name: "matching config", + cfgType: MatchingConfig, + }, + { + name: "non-matching config", + cfgType: NonMatchingConfig, + wantErr: fs.ErrNotExist, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + top := t.TempDir() + paths.SetTop(top) + paths.SetConfig(top) + dataDir := paths.Data() + + wantSecret, err := generateTestSecrets(dataDir, 3, tc.cfgType) + if err != nil { + t.Fatal(err) + } + sec, err := findPreviousAgentSecret(dataDir) + + if !errors.Is(err, tc.wantErr) { + t.Fatalf("want err: %v, got err: %v", tc.wantErr, err) + } + diff := cmp.Diff(sec, wantSecret) + if diff != "" { + t.Fatal(diff) + } + }) + } +} + +func TestMigrateAgentSecret(t *testing.T) { + top := t.TempDir() + paths.SetTop(top) + paths.SetConfig(top) + dataDir := paths.Data() + + // No vault home path + homePath := generateTestHomePath(dataDir) + if err := os.MkdirAll(homePath, 0750); err != nil { + t.Fatal(err) + } + + // Empty vault home path + homePath = generateTestHomePath(dataDir) + vaultPath := getLegacyVaultPathFromPath(homePath) + if err := os.MkdirAll(vaultPath, 0750); err != nil { + t.Fatal(err) + } + + // Vault with missing seed + homePath = generateTestHomePath(dataDir) + vaultPath = getLegacyVaultPathFromPath(homePath) + v, err := vault.New(vaultPath) + if err != nil { + t.Fatal(err) + } + defer v.Close() + + if err = os.Remove(filepath.Join(vaultPath, ".seed")); err != nil { + t.Fatal(err) + } + + // Generate few valid secrets to scan for + wantSecret, err := generateTestSecrets(dataDir, 5, MatchingConfig) + if err != nil { + t.Fatal(err) + } + + // Expect no agent secret found + _, err = secret.GetAgentSecret(secret.WithVaultPath(paths.AgentVaultPath())) + if !errors.Is(err, fs.ErrNotExist) { + t.Fatalf("expected err: %v", fs.ErrNotExist) + } + + // Perform migration + log := logp.NewLogger("test_agent_secret") + err = MigrateAgentSecret(log) + if err != nil { + t.Fatal(err) + } + + // Expect the agent secret is migrated now + sec, err := secret.GetAgentSecret(secret.WithVaultPath(paths.AgentVaultPath())) + if err != nil { + t.Fatal(err) + } + + // Compare the migrated secret with the expected newest one + diff := cmp.Diff(sec, wantSecret) + if diff != "" { + t.Fatal(diff) + } +} + +func TestMigrateAgentSecretAlreadyExists(t *testing.T) { + top := t.TempDir() + paths.SetTop(top) + err := secret.CreateAgentSecret(secret.WithVaultPath(paths.AgentVaultPath())) + if err != nil { + t.Fatal(err) + } + + // Expect agent secret created + wantSecret, err := secret.GetAgentSecret(secret.WithVaultPath(paths.AgentVaultPath())) + if err != nil { + t.Fatal(err) + } + + // Perform migration + log := logp.NewLogger("test_agent_secret") + err = MigrateAgentSecret(log) + if err != nil { + t.Fatal(err) + } + + sec, err := secret.GetAgentSecret(secret.WithVaultPath(paths.AgentVaultPath())) + if err != nil { + t.Fatal(err) + } + + // Compare, should be the same secret + diff := cmp.Diff(sec, wantSecret) + if diff != "" { + t.Fatal(diff) + } +} + +func TestMigrateAgentSecretFromLegacyLocation(t *testing.T) { + top := t.TempDir() + paths.SetTop(top) + paths.SetConfig(top) + vaultPath := getLegacyVaultPath() + err := secret.CreateAgentSecret(secret.WithVaultPath(vaultPath)) + if err != nil { + t.Fatal(err) + } + + // Expect agent secret created + wantSecret, err := secret.GetAgentSecret(secret.WithVaultPath(vaultPath)) + if err != nil { + t.Fatal(err) + } + + // Perform migration + log := logp.NewLogger("test_agent_secret") + err = MigrateAgentSecret(log) + if err != nil { + t.Fatal(err) + } + + sec, err := secret.GetAgentSecret(secret.WithVaultPath(paths.AgentVaultPath())) + if err != nil { + t.Fatal(err) + } + + // Compare, should be the same secret + diff := cmp.Diff(sec, wantSecret) + if diff != "" { + t.Fatal(diff) + } +} + +func generateTestHomePath(dataDir string) string { + suffix := uuid.Must(uuid.NewV4()).String()[:6] + return filepath.Join(dataDir, "elastic-agent-"+suffix) +} + +func generateTestConfig(vaultPath string) error { + fleetEncConfigFile := paths.AgentConfigFile() + store := storage.NewEncryptedDiskStore(fleetEncConfigFile, storage.WithVaultPath(vaultPath)) + return store.Save(strings.NewReader("foo")) +} + +func generateTestSecrets(dataDir string, count int, cfgType configType) (wantSecret secret.Secret, err error) { + now := time.Now() + + // Generate multiple home paths + //homePaths := make([]string, count) + for i := 0; i < count; i++ { + homePath := generateTestHomePath(dataDir) + k, err := vault.NewKey(vault.AES256) + if err != nil { + return wantSecret, err + } + + sec := secret.Secret{ + Value: k, + CreatedOn: now.Add(-time.Duration(i+1) * time.Minute), + } + + vaultPath := getLegacyVaultPathFromPath(homePath) + err = secret.SetAgentSecret(sec, secret.WithVaultPath(vaultPath)) + if err != nil { + return wantSecret, err + } + + switch cfgType { + case NoConfig: + case MatchingConfig, NonMatchingConfig: + if i == 0 { + wantSecret = sec + // Create matching encrypted config file, the content of the file doesn't matter for this test + err = generateTestConfig(vaultPath) + if err != nil { + return wantSecret, err + } + } + } + // Delete + if cfgType == NonMatchingConfig && i == 0 { + _ = os.RemoveAll(vaultPath) + wantSecret = secret.Secret{} + } + } + + return wantSecret, nil +} diff --git a/internal/pkg/agent/vault/seed.go b/internal/pkg/agent/vault/seed.go index 698bd0f0135..773c42e7465 100644 --- a/internal/pkg/agent/vault/seed.go +++ b/internal/pkg/agent/vault/seed.go @@ -9,6 +9,8 @@ package vault import ( "errors" + "fmt" + "io/fs" "io/ioutil" "os" "path/filepath" @@ -29,6 +31,24 @@ func getSeed(path string) ([]byte, error) { mxSeed.Lock() defer mxSeed.Unlock() + b, err := ioutil.ReadFile(fp) + if err != nil { + return nil, fmt.Errorf("could not read seed file: %w", err) + } + + // return fs.ErrNotExists if invalid length of bytes returned + if len(b) != int(AES256) { + return nil, fmt.Errorf("invalid seed length, expected: %v, got: %v: %w", int(AES256), len(b), fs.ErrNotExist) + } + return b, nil +} + +func createSeedIfNotExists(path string) ([]byte, error) { + fp := filepath.Join(path, seedFile) + + mxSeed.Lock() + defer mxSeed.Unlock() + b, err := ioutil.ReadFile(fp) if err != nil { if !errors.Is(err, os.ErrNotExist) { @@ -52,3 +72,10 @@ func getSeed(path string) ([]byte, error) { return seed, nil } + +func getOrCreateSeed(path string, readonly bool) ([]byte, error) { + if readonly { + return getSeed(path) + } + return createSeedIfNotExists(path) +} diff --git a/internal/pkg/agent/vault/seed_test.go b/internal/pkg/agent/vault/seed_test.go index bb9197ea614..d10be29634f 100644 --- a/internal/pkg/agent/vault/seed_test.go +++ b/internal/pkg/agent/vault/seed_test.go @@ -10,12 +10,14 @@ package vault import ( "context" "encoding/hex" + "io/fs" "path/filepath" "sync" "testing" "github.com/google/go-cmp/cmp" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "golang.org/x/sync/errgroup" ) @@ -24,12 +26,45 @@ func TestGetSeed(t *testing.T) { fp := filepath.Join(dir, seedFile) + require.NoFileExists(t, fp) + + // seed is not yet created + _, err := getSeed(dir) + + // should be not found + require.ErrorIs(t, err, fs.ErrNotExist) + + b, err := createSeedIfNotExists(dir) + assert.NoError(t, err) + + require.FileExists(t, fp) + + diff := cmp.Diff(int(AES256), len(b)) + if diff != "" { + t.Error(diff) + } + + // try get seed + gotSeed, err := getSeed(dir) + assert.NoError(t, err) + + diff = cmp.Diff(b, gotSeed) + if diff != "" { + t.Error(diff) + } +} + +func TestCreateSeedIfNotExists(t *testing.T) { + dir := t.TempDir() + + fp := filepath.Join(dir, seedFile) + assert.NoFileExists(t, fp) - b, err := getSeed(dir) + b, err := createSeedIfNotExists(dir) assert.NoError(t, err) - assert.FileExists(t, fp) + require.FileExists(t, fp) diff := cmp.Diff(int(AES256), len(b)) if diff != "" { @@ -37,7 +72,7 @@ func TestGetSeed(t *testing.T) { } } -func TestGetSeedRace(t *testing.T) { +func TestCreateSeedIfNotExistsRace(t *testing.T) { var err error dir := t.TempDir() @@ -51,7 +86,7 @@ func TestGetSeedRace(t *testing.T) { for i := 0; i < count; i++ { g.Go(func(idx int) func() error { return func() error { - seed, err := getSeed(dir) + seed, err := createSeedIfNotExists(dir) mx.Lock() res[idx] = seed mx.Unlock() diff --git a/internal/pkg/agent/vault/vault_darwin.go b/internal/pkg/agent/vault/vault_darwin.go index 4119b27a586..bfcb636da6f 100644 --- a/internal/pkg/agent/vault/vault_darwin.go +++ b/internal/pkg/agent/vault/vault_darwin.go @@ -38,7 +38,7 @@ type Vault struct { // New initializes the vault store // Call Close when done to release the resouces -func New(name string) (*Vault, error) { +func New(name string, opts ...OptionFunc) (*Vault, error) { var keychain C.SecKeychainRef err := statusToError(C.OpenKeychain(keychain)) if err != nil { diff --git a/internal/pkg/agent/vault/vault_linux.go b/internal/pkg/agent/vault/vault_linux.go index a3737d5c625..23d0a0e859f 100644 --- a/internal/pkg/agent/vault/vault_linux.go +++ b/internal/pkg/agent/vault/vault_linux.go @@ -11,6 +11,7 @@ import ( "crypto/rand" "crypto/sha256" "errors" + "fmt" "io/fs" "io/ioutil" "os" @@ -29,8 +30,9 @@ type Vault struct { mx sync.Mutex } -// Open initializes the vault store -func New(path string) (*Vault, error) { +// New creates the vault store +func New(path string, opts ...OptionFunc) (v *Vault, err error) { + options := applyOptions(opts...) dir := filepath.Dir(path) // If there is no specific path then get the executable directory @@ -43,12 +45,22 @@ func New(path string) (*Vault, error) { path = filepath.Join(dir, path) } - err := os.MkdirAll(path, 0750) - if err != nil { - return nil, err + if options.readonly { + fi, err := os.Stat(path) + if err != nil { + return nil, err + } + if !fi.IsDir() { + return nil, fs.ErrNotExist + } + } else { + err := os.MkdirAll(path, 0750) + if err != nil { + return nil, fmt.Errorf("failed to create vault path: %v, err: %w", path, err) + } } - key, err := getSeed(path) + key, err := getOrCreateSeed(path, options.readonly) if err != nil { return nil, err } diff --git a/internal/pkg/agent/vault/vault_options.go b/internal/pkg/agent/vault/vault_options.go new file mode 100644 index 00000000000..2673ae6aa53 --- /dev/null +++ b/internal/pkg/agent/vault/vault_options.go @@ -0,0 +1,28 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package vault + +type Options struct { + readonly bool +} + +type OptionFunc func(o *Options) + +func WithReadonly(readonly bool) OptionFunc { + return func(o *Options) { + o.readonly = readonly + } +} + +//nolint:unused // not used on darwin +func applyOptions(opts ...OptionFunc) Options { + var options Options + + for _, opt := range opts { + opt(&options) + } + + return options +} diff --git a/internal/pkg/agent/vault/vault_windows.go b/internal/pkg/agent/vault/vault_windows.go index 7468fe16814..c39769cc8da 100644 --- a/internal/pkg/agent/vault/vault_windows.go +++ b/internal/pkg/agent/vault/vault_windows.go @@ -27,7 +27,8 @@ type Vault struct { } // Open initializes the vault store -func New(path string) (*Vault, error) { +func New(path string, opts ...OptionFunc) (v *Vault, err error) { + options := applyOptions(opts...) dir := filepath.Dir(path) // If there is no specific path then get the executable directory @@ -40,16 +41,26 @@ func New(path string) (*Vault, error) { path = filepath.Join(dir, path) } - err := os.MkdirAll(path, 0750) - if err != nil { - return nil, err - } - err = systemAdministratorsOnly(path, false) - if err != nil { - return nil, err + if options.readonly { + fi, err := os.Stat(path) + if err != nil { + return nil, err + } + if !fi.IsDir() { + return nil, fs.ErrNotExist + } + } else { + err := os.MkdirAll(path, 0750) + if err != nil { + return nil, err + } + err = systemAdministratorsOnly(path, false) + if err != nil { + return nil, err + } } - entropy, err := getSeed(path) + entropy, err := getOrCreateSeed(path, options.readonly) if err != nil { return nil, err } From 394033dbd9cc2105be37fc1bb971d7ba3449fc3d Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Wed, 20 Jul 2022 02:12:00 -0400 Subject: [PATCH 053/180] [Automation] Update elastic stack version to 8.4.0-31269fd2 for testing (#746) Co-authored-by: apmmachine --- testing/environments/snapshot.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/testing/environments/snapshot.yml b/testing/environments/snapshot.yml index 43b309fb98e..9e2fd843b08 100644 --- a/testing/environments/snapshot.yml +++ b/testing/environments/snapshot.yml @@ -3,7 +3,7 @@ version: '2.3' services: elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch:8.4.0-31315ca3-SNAPSHOT + image: docker.elastic.co/elasticsearch/elasticsearch:8.4.0-31269fd2-SNAPSHOT # When extend is used it merges healthcheck.tests, see: # https://github.com/docker/compose/issues/8962 # healthcheck: @@ -42,7 +42,7 @@ services: - ./docker/logstash/pki:/etc/pki:ro kibana: - image: docker.elastic.co/kibana/kibana:8.4.0-31315ca3-SNAPSHOT + image: docker.elastic.co/kibana/kibana:8.4.0-31269fd2-SNAPSHOT environment: - "ELASTICSEARCH_USERNAME=kibana_system_user" - "ELASTICSEARCH_PASSWORD=testing" From 97e6597a24c0f16e530fdbf86c82953374f28549 Mon Sep 17 00:00:00 2001 From: Anderson Queiroz Date: Wed, 20 Jul 2022 11:29:07 +0200 Subject: [PATCH 054/180] wrap errors and fix some docs typo and convention (#743) --- internal/pkg/agent/application/info/agent_id.go | 16 ++++++++-------- .../pkg/agent/application/info/agent_metadata.go | 7 ++++--- .../pkg/agent/storage/encrypted_disk_store.go | 3 ++- internal/pkg/agent/vault/vault_darwin.go | 6 ++++-- internal/pkg/agent/vault/vault_linux.go | 4 ++-- 5 files changed, 20 insertions(+), 16 deletions(-) diff --git a/internal/pkg/agent/application/info/agent_id.go b/internal/pkg/agent/application/info/agent_id.go index e376a0fbfb4..2d5fbfc76e1 100644 --- a/internal/pkg/agent/application/info/agent_id.go +++ b/internal/pkg/agent/application/info/agent_id.go @@ -71,7 +71,7 @@ func getInfoFromStore(s ioStore, logLevel string) (*persistentAgentInfo, error) agentConfigFile := paths.AgentConfigFile() reader, err := s.Load() if err != nil { - return nil, err + return nil, fmt.Errorf("failed to load from ioStore: %w", err) } // reader is closed by this function @@ -195,20 +195,20 @@ func loadAgentInfo(forceUpdate bool, logLevel string, createAgentID bool) (*pers agentConfigFile := paths.AgentConfigFile() diskStore := storage.NewEncryptedDiskStore(agentConfigFile) - agentinfo, err := getInfoFromStore(diskStore, logLevel) + agentInfo, err := getInfoFromStore(diskStore, logLevel) if err != nil { - return nil, err + return nil, fmt.Errorf("could not get agent info from store: %w", err) } - if agentinfo != nil && !forceUpdate && (agentinfo.ID != "" || !createAgentID) { - return agentinfo, nil + if agentInfo != nil && !forceUpdate && (agentInfo.ID != "" || !createAgentID) { + return agentInfo, nil } - if err := updateID(agentinfo, diskStore); err != nil { - return nil, err + if err := updateID(agentInfo, diskStore); err != nil { + return nil, fmt.Errorf("could not update agent ID on disk store: %w", err) } - return agentinfo, nil + return agentInfo, nil } func updateID(agentInfo *persistentAgentInfo, s ioStore) error { diff --git a/internal/pkg/agent/application/info/agent_metadata.go b/internal/pkg/agent/application/info/agent_metadata.go index a532487a446..49afeca9dc7 100644 --- a/internal/pkg/agent/application/info/agent_metadata.go +++ b/internal/pkg/agent/application/info/agent_metadata.go @@ -10,10 +10,11 @@ import ( "runtime" "strings" - "github.com/elastic/elastic-agent/internal/pkg/agent/errors" - "github.com/elastic/elastic-agent/internal/pkg/release" "github.com/elastic/go-sysinfo" "github.com/elastic/go-sysinfo/types" + + "github.com/elastic/elastic-agent/internal/pkg/agent/errors" + "github.com/elastic/elastic-agent/internal/pkg/release" ) // ECSMeta is a collection of agent related metadata in ECS compliant object form. @@ -123,7 +124,7 @@ const ( func Metadata() (*ECSMeta, error) { agentInfo, err := NewAgentInfo(false) if err != nil { - return nil, err + return nil, fmt.Errorf("failed to create new agent info: %w", err) } meta, err := agentInfo.ECSMetadata() diff --git a/internal/pkg/agent/storage/encrypted_disk_store.go b/internal/pkg/agent/storage/encrypted_disk_store.go index 7fe2f70339a..e3ea3c3f6cf 100644 --- a/internal/pkg/agent/storage/encrypted_disk_store.go +++ b/internal/pkg/agent/storage/encrypted_disk_store.go @@ -15,6 +15,7 @@ import ( "github.com/hectane/go-acl" "github.com/elastic/elastic-agent-libs/file" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/paths" "github.com/elastic/elastic-agent/internal/pkg/agent/application/secret" "github.com/elastic/elastic-agent/internal/pkg/agent/errors" @@ -78,7 +79,7 @@ func (d *EncryptedDiskStore) ensureKey() error { if d.key == nil { key, err := secret.GetAgentSecret(secret.WithVaultPath(d.vaultPath)) if err != nil { - return err + return fmt.Errorf("could not get agent key: %w", err) } d.key = key.Value } diff --git a/internal/pkg/agent/vault/vault_darwin.go b/internal/pkg/agent/vault/vault_darwin.go index bfcb636da6f..5f63a496179 100644 --- a/internal/pkg/agent/vault/vault_darwin.go +++ b/internal/pkg/agent/vault/vault_darwin.go @@ -37,13 +37,15 @@ type Vault struct { } // New initializes the vault store -// Call Close when done to release the resouces +// Call Close when done to release the resources func New(name string, opts ...OptionFunc) (*Vault, error) { var keychain C.SecKeychainRef + err := statusToError(C.OpenKeychain(keychain)) if err != nil { - return nil, err + return nil, fmt.Errorf("could not open keychain: %w", err) } + return &Vault{ name: name, keychain: keychain, diff --git a/internal/pkg/agent/vault/vault_linux.go b/internal/pkg/agent/vault/vault_linux.go index 23d0a0e859f..51f6a3fa651 100644 --- a/internal/pkg/agent/vault/vault_linux.go +++ b/internal/pkg/agent/vault/vault_linux.go @@ -39,7 +39,7 @@ func New(path string, opts ...OptionFunc) (v *Vault, err error) { if dir == "." { exefp, err := os.Executable() if err != nil { - return nil, err + return nil, fmt.Errorf("could not get executable path: %w", err) } dir = filepath.Dir(exefp) path = filepath.Join(dir, path) @@ -62,7 +62,7 @@ func New(path string, opts ...OptionFunc) (v *Vault, err error) { key, err := getOrCreateSeed(path, options.readonly) if err != nil { - return nil, err + return nil, fmt.Errorf("could not get seed to create new valt: %w", err) } return &Vault{ From b0e9623f33c84f1e19515a4306e3b9d37a560a85 Mon Sep 17 00:00:00 2001 From: Victor Martinez Date: Wed, 20 Jul 2022 11:56:52 +0100 Subject: [PATCH 055/180] automate the ironbank docker context generation (#679) --- .ci/Jenkinsfile | 2 +- .gitignore | 1 - dev-tools/packaging/files/ironbank/LICENSE | 280 ++++++++++++++++++ .../files/ironbank/config/docker-entrypoint | 11 + .../templates/ironbank/Dockerfile.tmpl | 90 ++++++ .../templates/ironbank/README.md.tmpl | 43 +++ .../ironbank/hardening_manifest.yaml.tmpl | 68 +++++ magefile.go | 108 +++++++ 8 files changed, 601 insertions(+), 2 deletions(-) create mode 100644 dev-tools/packaging/files/ironbank/LICENSE create mode 100644 dev-tools/packaging/files/ironbank/config/docker-entrypoint create mode 100644 dev-tools/packaging/templates/ironbank/Dockerfile.tmpl create mode 100644 dev-tools/packaging/templates/ironbank/README.md.tmpl create mode 100644 dev-tools/packaging/templates/ironbank/hardening_manifest.yaml.tmpl diff --git a/.ci/Jenkinsfile b/.ci/Jenkinsfile index 34a72100707..9d24c6ea810 100644 --- a/.ci/Jenkinsfile +++ b/.ci/Jenkinsfile @@ -175,7 +175,7 @@ pipeline { withMageEnv(){ dir("${BASE_DIR}"){ withPackageEnv("${PLATFORM}") { - cmd(label: 'Go package', script: 'mage package') + cmd(label: 'Go package', script: 'mage package ironbank') uploadPackagesToGoogleBucket( credentialsId: env.JOB_GCS_EXT_CREDENTIALS, repo: env.REPO, diff --git a/.gitignore b/.gitignore index 89eaa67db73..939765242f5 100644 --- a/.gitignore +++ b/.gitignore @@ -62,4 +62,3 @@ internal/pkg/agent/transpiler/tests/exec-1.0-darwin-x86_64/exec # VSCode /.vscode - diff --git a/dev-tools/packaging/files/ironbank/LICENSE b/dev-tools/packaging/files/ironbank/LICENSE new file mode 100644 index 00000000000..ef2739c152e --- /dev/null +++ b/dev-tools/packaging/files/ironbank/LICENSE @@ -0,0 +1,280 @@ +ELASTIC LICENSE AGREEMENT + +PLEASE READ CAREFULLY THIS ELASTIC LICENSE AGREEMENT (THIS "AGREEMENT"), WHICH +CONSTITUTES A LEGALLY BINDING AGREEMENT AND GOVERNS ALL OF YOUR USE OF ALL OF +THE ELASTIC SOFTWARE WITH WHICH THIS AGREEMENT IS INCLUDED ("ELASTIC SOFTWARE") +THAT IS PROVIDED IN OBJECT CODE FORMAT, AND, IN ACCORDANCE WITH SECTION 2 BELOW, +CERTAIN OF THE ELASTIC SOFTWARE THAT IS PROVIDED IN SOURCE CODE FORMAT. BY +INSTALLING OR USING ANY OF THE ELASTIC SOFTWARE GOVERNED BY THIS AGREEMENT, YOU +ARE ASSENTING TO THE TERMS AND CONDITIONS OF THIS AGREEMENT. IF YOU DO NOT AGREE +WITH SUCH TERMS AND CONDITIONS, YOU MAY NOT INSTALL OR USE THE ELASTIC SOFTWARE +GOVERNED BY THIS AGREEMENT. IF YOU ARE INSTALLING OR USING THE SOFTWARE ON +BEHALF OF A LEGAL ENTITY, YOU REPRESENT AND WARRANT THAT YOU HAVE THE ACTUAL +AUTHORITY TO AGREE TO THE TERMS AND CONDITIONS OF THIS AGREEMENT ON BEHALF OF +SUCH ENTITY. + +Posted Date: April 20, 2018 + +This Agreement is entered into by and between Elasticsearch BV ("Elastic") and +You, or the legal entity on behalf of whom You are acting (as applicable, +"You"). + +1. OBJECT CODE END USER LICENSES, RESTRICTIONS AND THIRD PARTY OPEN SOURCE +SOFTWARE + + 1.1 Object Code End User License. Subject to the terms and conditions of + Section 1.2 of this Agreement, Elastic hereby grants to You, AT NO CHARGE and + for so long as you are not in breach of any provision of this Agreement, a + License to the Basic Features and Functions of the Elastic Software. + + 1.2 Reservation of Rights; Restrictions. As between Elastic and You, Elastic + and its licensors own all right, title and interest in and to the Elastic + Software, and except as expressly set forth in Sections 1.1, and 2.1 of this + Agreement, no other license to the Elastic Software is granted to You under + this Agreement, by implication, estoppel or otherwise. You agree not to: (i) + reverse engineer or decompile, decrypt, disassemble or otherwise reduce any + Elastic Software provided to You in Object Code, or any portion thereof, to + Source Code, except and only to the extent any such restriction is prohibited + by applicable law, (ii) except as expressly permitted in this Agreement, + prepare derivative works from, modify, copy or use the Elastic Software Object + Code or the Commercial Software Source Code in any manner; (iii) except as + expressly permitted in Section 1.1 above, transfer, sell, rent, lease, + distribute, sublicense, loan or otherwise transfer, Elastic Software Object + Code, in whole or in part, to any third party; (iv) use Elastic Software + Object Code for providing time-sharing services, any software-as-a-service, + service bureau services or as part of an application services provider or + other service offering (collectively, "SaaS Offering") where obtaining access + to the Elastic Software or the features and functions of the Elastic Software + is a primary reason or substantial motivation for users of the SaaS Offering + to access and/or use the SaaS Offering ("Prohibited SaaS Offering"); (v) + circumvent the limitations on use of Elastic Software provided to You in + Object Code format that are imposed or preserved by any License Key, or (vi) + alter or remove any Marks and Notices in the Elastic Software. If You have any + question as to whether a specific SaaS Offering constitutes a Prohibited SaaS + Offering, or are interested in obtaining Elastic's permission to engage in + commercial or non-commercial distribution of the Elastic Software, please + contact elastic_license@elastic.co. + + 1.3 Third Party Open Source Software. The Commercial Software may contain or + be provided with third party open source libraries, components, utilities and + other open source software (collectively, "Open Source Software"), which Open + Source Software may have applicable license terms as identified on a website + designated by Elastic. Notwithstanding anything to the contrary herein, use of + the Open Source Software shall be subject to the license terms and conditions + applicable to such Open Source Software, to the extent required by the + applicable licensor (which terms shall not restrict the license rights granted + to You hereunder, but may contain additional rights). To the extent any + condition of this Agreement conflicts with any license to the Open Source + Software, the Open Source Software license will govern with respect to such + Open Source Software only. Elastic may also separately provide you with + certain open source software that is licensed by Elastic. Your use of such + Elastic open source software will not be governed by this Agreement, but by + the applicable open source license terms. + +2. COMMERCIAL SOFTWARE SOURCE CODE + + 2.1 Limited License. Subject to the terms and conditions of Section 2.2 of + this Agreement, Elastic hereby grants to You, AT NO CHARGE and for so long as + you are not in breach of any provision of this Agreement, a limited, + non-exclusive, non-transferable, fully paid up royalty free right and license + to the Commercial Software in Source Code format, without the right to grant + or authorize sublicenses, to prepare Derivative Works of the Commercial + Software, provided You (i) do not hack the licensing mechanism, or otherwise + circumvent the intended limitations on the use of Elastic Software to enable + features other than Basic Features and Functions or those features You are + entitled to as part of a Subscription, and (ii) use the resulting object code + only for reasonable testing purposes. + + 2.2 Restrictions. Nothing in Section 2.1 grants You the right to (i) use the + Commercial Software Source Code other than in accordance with Section 2.1 + above, (ii) use a Derivative Work of the Commercial Software outside of a + Non-production Environment, in any production capacity, on a temporary or + permanent basis, or (iii) transfer, sell, rent, lease, distribute, sublicense, + loan or otherwise make available the Commercial Software Source Code, in whole + or in part, to any third party. Notwithstanding the foregoing, You may + maintain a copy of the repository in which the Source Code of the Commercial + Software resides and that copy may be publicly accessible, provided that you + include this Agreement with Your copy of the repository. + +3. TERMINATION + + 3.1 Termination. This Agreement will automatically terminate, whether or not + You receive notice of such Termination from Elastic, if You breach any of its + provisions. + + 3.2 Post Termination. Upon any termination of this Agreement, for any reason, + You shall promptly cease the use of the Elastic Software in Object Code format + and cease use of the Commercial Software in Source Code format. For the + avoidance of doubt, termination of this Agreement will not affect Your right + to use Elastic Software, in either Object Code or Source Code formats, made + available under the Apache License Version 2.0. + + 3.3 Survival. Sections 1.2, 2.2. 3.3, 4 and 5 shall survive any termination or + expiration of this Agreement. + +4. DISCLAIMER OF WARRANTIES AND LIMITATION OF LIABILITY + + 4.1 Disclaimer of Warranties. TO THE MAXIMUM EXTENT PERMITTED UNDER APPLICABLE + LAW, THE ELASTIC SOFTWARE IS PROVIDED "AS IS" WITHOUT WARRANTY OF ANY KIND, + AND ELASTIC AND ITS LICENSORS MAKE NO WARRANTIES WHETHER EXPRESSED, IMPLIED OR + STATUTORY REGARDING OR RELATING TO THE ELASTIC SOFTWARE. TO THE MAXIMUM EXTENT + PERMITTED UNDER APPLICABLE LAW, ELASTIC AND ITS LICENSORS SPECIFICALLY + DISCLAIM ALL IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR + PURPOSE AND NON-INFRINGEMENT WITH RESPECT TO THE ELASTIC SOFTWARE, AND WITH + RESPECT TO THE USE OF THE FOREGOING. FURTHER, ELASTIC DOES NOT WARRANT RESULTS + OF USE OR THAT THE ELASTIC SOFTWARE WILL BE ERROR FREE OR THAT THE USE OF THE + ELASTIC SOFTWARE WILL BE UNINTERRUPTED. + + 4.2 Limitation of Liability. IN NO EVENT SHALL ELASTIC OR ITS LICENSORS BE + LIABLE TO YOU OR ANY THIRD PARTY FOR ANY DIRECT OR INDIRECT DAMAGES, + INCLUDING, WITHOUT LIMITATION, FOR ANY LOSS OF PROFITS, LOSS OF USE, BUSINESS + INTERRUPTION, LOSS OF DATA, COST OF SUBSTITUTE GOODS OR SERVICES, OR FOR ANY + SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES OF ANY KIND, IN CONNECTION WITH + OR ARISING OUT OF THE USE OR INABILITY TO USE THE ELASTIC SOFTWARE, OR THE + PERFORMANCE OF OR FAILURE TO PERFORM THIS AGREEMENT, WHETHER ALLEGED AS A + BREACH OF CONTRACT OR TORTIOUS CONDUCT, INCLUDING NEGLIGENCE, EVEN IF ELASTIC + HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. + +5. MISCELLANEOUS + + This Agreement completely and exclusively states the entire agreement of the + parties regarding the subject matter herein, and it supersedes, and its terms + govern, all prior proposals, agreements, or other communications between the + parties, oral or written, regarding such subject matter. This Agreement may be + modified by Elastic from time to time, and any such modifications will be + effective upon the "Posted Date" set forth at the top of the modified + Agreement. If any provision hereof is held unenforceable, this Agreement will + continue without said provision and be interpreted to reflect the original + intent of the parties. This Agreement and any non-contractual obligation + arising out of or in connection with it, is governed exclusively by Dutch law. + This Agreement shall not be governed by the 1980 UN Convention on Contracts + for the International Sale of Goods. All disputes arising out of or in + connection with this Agreement, including its existence and validity, shall be + resolved by the courts with jurisdiction in Amsterdam, The Netherlands, except + where mandatory law provides for the courts at another location in The + Netherlands to have jurisdiction. The parties hereby irrevocably waive any and + all claims and defenses either might otherwise have in any such action or + proceeding in any of such courts based upon any alleged lack of personal + jurisdiction, improper venue, forum non conveniens or any similar claim or + defense. A breach or threatened breach, by You of Section 2 may cause + irreparable harm for which damages at law may not provide adequate relief, and + therefore Elastic shall be entitled to seek injunctive relief without being + required to post a bond. You may not assign this Agreement (including by + operation of law in connection with a merger or acquisition), in whole or in + part to any third party without the prior written consent of Elastic, which + may be withheld or granted by Elastic in its sole and absolute discretion. + Any assignment in violation of the preceding sentence is void. Notices to + Elastic may also be sent to legal@elastic.co. + +6. DEFINITIONS + + The following terms have the meanings ascribed: + + 6.1 "Affiliate" means, with respect to a party, any entity that controls, is + controlled by, or which is under common control with, such party, where + "control" means ownership of at least fifty percent (50%) of the outstanding + voting shares of the entity, or the contractual right to establish policy for, + and manage the operations of, the entity. + + 6.2 "Basic Features and Functions" means those features and functions of the + Elastic Software that are eligible for use under a Basic license, as set forth + at https://www.elastic.co/subscriptions, as may be modified by Elastic from + time to time. + + 6.3 "Commercial Software" means the Elastic Software Source Code in any file + containing a header stating the contents are subject to the Elastic License or + which is contained in the repository folder labeled "x-pack", unless a LICENSE + file present in the directory subtree declares a different license. + + 6.4 "Derivative Work of the Commercial Software" means, for purposes of this + Agreement, any modification(s) or enhancement(s) to the Commercial Software, + which represent, as a whole, an original work of authorship. + + 6.5 "License" means a limited, non-exclusive, non-transferable, fully paid up, + royalty free, right and license, without the right to grant or authorize + sublicenses, solely for Your internal business operations to (i) install and + use the applicable Features and Functions of the Elastic Software in Object + Code, and (ii) permit Contractors and Your Affiliates to use the Elastic + software as set forth in (i) above, provided that such use by Contractors must + be solely for Your benefit and/or the benefit of Your Affiliates, and You + shall be responsible for all acts and omissions of such Contractors and + Affiliates in connection with their use of the Elastic software that are + contrary to the terms and conditions of this Agreement. + + 6.6 "License Key" means a sequence of bytes, including but not limited to a + JSON blob, that is used to enable certain features and functions of the + Elastic Software. + + 6.7 "Marks and Notices" means all Elastic trademarks, trade names, logos and + notices present on the Documentation as originally provided by Elastic. + + 6.8 "Non-production Environment" means an environment for development, testing + or quality assurance, where software is not used for production purposes. + + 6.9 "Object Code" means any form resulting from mechanical transformation or + translation of Source Code form, including but not limited to compiled object + code, generated documentation, and conversions to other media types. + + 6.10 "Source Code" means the preferred form of computer software for making + modifications, including but not limited to software source code, + documentation source, and configuration files. + + 6.11 "Subscription" means the right to receive Support Services and a License + to the Commercial Software. + + +GOVERNMENT END USER ADDENDUM TO THE ELASTIC LICENSE AGREEMENT + + This ADDENDUM TO THE ELASTIC LICENSE AGREEMENT (this "Addendum") applies +only to U.S. Federal Government, State Government, and Local Government +entities ("Government End Users") of the Elastic Software. This Addendum is +subject to, and hereby incorporated into, the Elastic License Agreement, +which is being entered into as of even date herewith, by Elastic and You (the +"Agreement"). This Addendum sets forth additional terms and conditions +related to Your use of the Elastic Software. Capitalized terms not defined in +this Addendum have the meaning set forth in the Agreement. + + 1. LIMITED LICENSE TO DISTRIBUTE (DSOP ONLY). Subject to the terms and +conditions of the Agreement (including this Addendum), Elastic grants the +Department of Defense Enterprise DevSecOps Initiative (DSOP) a royalty-free, +non-exclusive, non-transferable, limited license to reproduce and distribute +the Elastic Software solely through a software distribution repository +controlled and managed by DSOP, provided that DSOP: (i) distributes the +Elastic Software complete and unmodified, inclusive of the Agreement +(including this Addendum) and (ii) does not remove or alter any proprietary +legends or notices contained in the Elastic Software. + + 2. CHOICE OF LAW. The choice of law and venue provisions set forth shall +prevail over those set forth in Section 5 of the Agreement. + + "For U.S. Federal Government Entity End Users. This Agreement and any + non-contractual obligation arising out of or in connection with it, is + governed exclusively by U.S. Federal law. To the extent permitted by + federal law, the laws of the State of Delaware (excluding Delaware choice + of law rules) will apply in the absence of applicable federal law. + + For State and Local Government Entity End Users. This Agreement and any + non-contractual obligation arising out of or in connection with it, is + governed exclusively by the laws of the state in which you are located + without reference to conflict of laws. Furthermore, the Parties agree that + the Uniform Computer Information Transactions Act or any version thereof, + adopted by any state in any form ('UCITA'), shall not apply to this + Agreement and, to the extent that UCITA is applicable, the Parties agree to + opt out of the applicability of UCITA pursuant to the opt-out provision(s) + contained therein." + + 3. ELASTIC LICENSE MODIFICATION. Section 5 of the Agreement is hereby +amended to replace + + "This Agreement may be modified by Elastic from time to time, and any + such modifications will be effective upon the "Posted Date" set forth at + the top of the modified Agreement." + + with: + + "This Agreement may be modified by Elastic from time to time; provided, + however, that any such modifications shall apply only to Elastic Software + that is installed after the "Posted Date" set forth at the top of the + modified Agreement." + +V100820.0 diff --git a/dev-tools/packaging/files/ironbank/config/docker-entrypoint b/dev-tools/packaging/files/ironbank/config/docker-entrypoint new file mode 100644 index 00000000000..7ebe21745f4 --- /dev/null +++ b/dev-tools/packaging/files/ironbank/config/docker-entrypoint @@ -0,0 +1,11 @@ +#!/bin/bash + +set -eo pipefail + +# For information on the possible environment variables that can be passed into the container. Run the following +# command for information on the options that are available. +# +# `./elastic-agent container --help` +# + +elastic-agent container "$@" diff --git a/dev-tools/packaging/templates/ironbank/Dockerfile.tmpl b/dev-tools/packaging/templates/ironbank/Dockerfile.tmpl new file mode 100644 index 00000000000..04c4dfde930 --- /dev/null +++ b/dev-tools/packaging/templates/ironbank/Dockerfile.tmpl @@ -0,0 +1,90 @@ +################################################################################ +# Build stage 0 +# Extract Elastic Agent and make various file manipulations. +################################################################################ +ARG BASE_REGISTRY=registry1.dsop.io +ARG BASE_IMAGE=ironbank/redhat/ubi/ubi8 +ARG BASE_TAG=8.6 + +FROM ${BASE_REGISTRY}/${BASE_IMAGE}:${BASE_TAG} as prep_files + +ARG ELASTIC_STACK={{ beat_version }} +ARG ELASTIC_PRODUCT=elastic-agent +ARG OS_AND_ARCH=linux-x86_64 + +RUN mkdir /usr/share/${ELASTIC_PRODUCT} +WORKDIR /usr/share/${ELASTIC_PRODUCT} +COPY --chown=1000:0 ${ELASTIC_PRODUCT}-${ELASTIC_STACK}-${OS_AND_ARCH}.tar.gz . +RUN tar --strip-components=1 -zxf ${ELASTIC_PRODUCT}-${ELASTIC_STACK}-${OS_AND_ARCH}.tar.gz \ + && rm ${ELASTIC_PRODUCT}-${ELASTIC_STACK}-${OS_AND_ARCH}.tar.gz + +# Support arbitrary user ids +# Ensure that group permissions are the same as user permissions. +# This will help when relying on GID-0 to run Kibana, rather than UID-1000. +# OpenShift does this, for example. +# REF: https://docs.okd.io/latest/openshift_images/create-images.html +RUN chmod -R g=u /usr/share/${ELASTIC_PRODUCT} + +# Create auxiliary folders and assigning default permissions. +RUN mkdir -p /usr/share/${ELASTIC_PRODUCT}/data /usr/share/${ELASTIC_PRODUCT}/logs && \ + chown -R root:root /usr/share/${ELASTIC_PRODUCT} && \ + find /usr/share/${ELASTIC_PRODUCT} -type d -exec chmod 0750 {} \; && \ + find /usr/share/${ELASTIC_PRODUCT} -type f -exec chmod 0640 {} \; && \ + chmod 0750 /usr/share/${ELASTIC_PRODUCT}/${ELASTIC_PRODUCT} && \ + chmod 0770 /usr/share/${ELASTIC_PRODUCT}/data /usr/share/${ELASTIC_PRODUCT}/logs + +################################################################################ +# Build stage 1 +# Copy prepared files from the previous stage and complete the image. +################################################################################ +FROM ${BASE_REGISTRY}/${BASE_IMAGE}:${BASE_TAG} + +ARG ELASTIC_PRODUCT=elastic-agent + +COPY LICENSE /licenses/elastic-${ELASTIC_PRODUCT} + +# Add a dumb init process +COPY tinit /tinit +RUN chmod +x /tinit + +# Bring in product from the initial stage. +COPY --from=prep_files --chown=1000:0 /usr/share/${ELASTIC_PRODUCT} /usr/share/${ELASTIC_PRODUCT} +WORKDIR /usr/share/${ELASTIC_PRODUCT} +RUN ln -s /usr/share/${ELASTIC_PRODUCT} /opt/${ELASTIC_PRODUCT} + +ENV ELASTIC_CONTAINER="true" +RUN ln -s /usr/share/${ELASTIC_PRODUCT}/${ELASTIC_PRODUCT} /usr/bin/${ELASTIC_PRODUCT} + +# Support arbitrary user ids +# Ensure gid 0 write permissions for OpenShift. +RUN chmod -R g+w /usr/share/${ELASTIC_PRODUCT} + +# config file ("${ELASTIC_PRODUCT}.yml") can only be writable by the root and group root +# it is needed on some configurations where the container needs to run as root +RUN chown root:root /usr/share/${ELASTIC_PRODUCT}/${ELASTIC_PRODUCT}.yml \ + && chmod go-w /usr/share/${ELASTIC_PRODUCT}/${ELASTIC_PRODUCT}.yml + +# Remove the suid bit everywhere to mitigate "Stack Clash" +RUN find / -xdev -perm -4000 -exec chmod u-s {} + + +# Provide a non-root user to run the process. +RUN groupadd --gid 1000 ${ELASTIC_PRODUCT} && useradd --uid 1000 --gid 1000 --groups 0 --home-dir /usr/share/${ELASTIC_PRODUCT} --no-create-home ${ELASTIC_PRODUCT} + +# Elastic Agent permissions +RUN find /usr/share//elastic-agent/data -type d -exec chmod 0770 {} \; && \ + find /usr/share//elastic-agent/data -type f -exec chmod 0660 {} \; && \ + chmod +x /usr/share//elastic-agent/data/elastic-agent-*/elastic-agent + +COPY jq /usr/local/bin +RUN chown root:root /usr/local/bin/jq && chmod 0755 /usr/local/bin/jq + +COPY config/docker-entrypoint /usr/local/bin/docker-entrypoint +RUN chmod 755 /usr/local/bin/docker-entrypoint + +USER ${ELASTIC_PRODUCT} +ENV ELASTIC_PRODUCT=${ELASTIC_PRODUCT} + +ENTRYPOINT ["/tinit", "--", "/usr/local/bin/docker-entrypoint"] +CMD [""] + +HEALTHCHECK --interval=10s --timeout=5s --start-period=1m --retries=5 CMD test -w '/tmp/elastic-agent/elastic-agent.sock' diff --git a/dev-tools/packaging/templates/ironbank/README.md.tmpl b/dev-tools/packaging/templates/ironbank/README.md.tmpl new file mode 100644 index 00000000000..271fdb8c0d7 --- /dev/null +++ b/dev-tools/packaging/templates/ironbank/README.md.tmpl @@ -0,0 +1,43 @@ +# elastic-agent + +**elastic-agent** is a single, unified way to add monitoring for logs, metrics, and other types of data to each host. A single agent makes it easier and faster to deploy monitoring across your infrastructure. The agent’s single, unified configuration makes it easier to add integrations for new data sources. + +For more information about elastic-agent, please visit +https://www.elastic.co/guide/en/ingest-management/7.17/index.html. + +--- + +**NOTE** + +This functionality is in beta and is subject to change. The design and code is less mature than official GA features and is being provided as-is with no warranties. Beta features are not subject to the support SLA of official GA features. + +--- + +### Installation instructions + +Please follow the documentation on [Quick start](https://www.elastic.co/guide/en/fleet/{{ .MajorMinor }}/fleet-elastic-agent-quick-start.html). + +### Where to file issues and PRs + +- [Issues](https://github.com/elastic/elastic-agent/issues) +- [PRs](https://github.com/elastic/elastic-agent/pulls) + +### DoD Restrictions + +### Where to get help + +- [elastic-agent Discuss Forums](https://discuss.elastic.co/tags/c/elastic-stack/beats/28/elastic-agent) +- [elastic-agent Documentation](https://www.elastic.co/guide/en/ingest-management/current/index.html) + +### Still need help? + +You can learn more about the Elastic Community and also understand how to get more help +visiting [Elastic Community](https://www.elastic.co/community). + +This software is governed by the [Elastic +License](https://github.com/elastic/beats/blob/{{ .MajorMinor }}/licenses/ELASTIC-LICENSE.txt), +and includes the full set of [free +features](https://www.elastic.co/subscriptions). + +View the detailed release notes +[here](https://www.elastic.co/guide/en/beats/libbeat/current/release-notes-{{ beat_version }}.html). diff --git a/dev-tools/packaging/templates/ironbank/hardening_manifest.yaml.tmpl b/dev-tools/packaging/templates/ironbank/hardening_manifest.yaml.tmpl new file mode 100644 index 00000000000..3c753caa0fb --- /dev/null +++ b/dev-tools/packaging/templates/ironbank/hardening_manifest.yaml.tmpl @@ -0,0 +1,68 @@ +--- +apiVersion: v1 + +# The repository name in registry1, excluding /ironbank/ +name: "elastic/beats/elastic-agent" + +# List of tags to push for the repository in registry1 +# The most specific version should be the first tag and will be shown +# on ironbank.dsop.io +tags: +- "{{ beat_version }}" +- "latest" + +# Build args passed to Dockerfile ARGs +args: + BASE_IMAGE: "redhat/ubi/ubi8" + BASE_TAG: "8.6" + ELASTIC_STACK: "{{ beat_version }}" + ELASTIC_PRODUCT: "elastic-agent" + +# Docker image labels +labels: + org.opencontainers.image.title: "elastic-agent" + ## Human-readable description of the software packaged in the image + org.opencontainers.image.description: "elastic-agent is a single, unified way to add monitoring for logs, metrics, and other types of data to each host" + ## License(s) under which contained software is distributed + org.opencontainers.image.licenses: "Elastic License" + ## URL to find more information on the image + org.opencontainers.image.url: "https://www.elastic.co/products/beats/elastic-agent" + ## Name of the distributing entity, organization or individual + org.opencontainers.image.vendor: "Elastic" + org.opencontainers.image.version: "{{ beat_version }}" + ## Keywords to help with search (ex. "cicd,gitops,golang") + mil.dso.ironbank.image.keywords: "log,metrics,monitoring,observabilty,o11y,oblt,beats,elastic,elasticsearch,golang" + ## This value can be "opensource" or "commercial" + mil.dso.ironbank.image.type: "commercial" + ## Product the image belongs to for grouping multiple images + mil.dso.ironbank.product.name: "beats" + +# List of resources to make available to the offline build context +resources: + - filename: "elastic-agent-{{ beat_version }}-linux-x86_64.tar.gz" + url: "/elastic-agent-{{ beat_version }}-linux-x86_64.tar.gz" + validation: + type: "sha512" + value: "" + - filename: tinit + url: https://github.com/krallin/tini/releases/download/v0.19.0/tini-amd64 + validation: + type: sha256 + value: 93dcc18adc78c65a028a84799ecf8ad40c936fdfc5f2a57b1acda5a8117fa82c + - filename: jq + url: https://github.com/stedolan/jq/releases/download/jq-1.6/jq-linux64 + validation: + type: sha256 + value: af986793a515d500ab2d35f8d2aecd656e764504b789b66d7e1a0b727a124c44 + +# List of project maintainers +maintainers: + - email: "nassim.kammah@elastic.co" + name: "Nassim Kammah" + username: "nassim.kammah" + - email: "ivan.fernandez@elastic.co" + name: "Ivan Fernandez Calvo" + username: "ivan.fernandez" + - email: "victor.martinez@elastic.co" + name: "Victor Martinez" + username: "victor.martinez" diff --git a/magefile.go b/magefile.go index aba39385159..a215f1639d7 100644 --- a/magefile.go +++ b/magefile.go @@ -817,3 +817,111 @@ func injectBuildVars(m map[string]string) { m[k] = v } } + +// Package packages elastic-agent for the IronBank distribution, relying on the +// binaries having already been built. +// +// Use SNAPSHOT=true to build snapshots. +func Ironbank() error { + if runtime.GOARCH != "amd64" { + fmt.Printf(">> IronBank images are only supported for amd64 arch (%s is not supported)\n", runtime.GOARCH) + return nil + } + if err := prepareIronbankBuild(); err != nil { + return errors.Wrap(err, "failed to prepare the IronBank context") + } + if err := saveIronbank(); err != nil { + return errors.Wrap(err, "failed to save artifacts for IronBank") + } + return nil +} + +func saveIronbank() error { + fmt.Println(">> saveIronbank: save the IronBank container context.") + + ironbank := getIronbankContextName() + buildDir := filepath.Join("build", ironbank) + if _, err := os.Stat(buildDir); os.IsNotExist(err) { + return fmt.Errorf("cannot find the folder with the ironbank context: %+v", err) + } + + distributionsDir := "build/distributions" + if _, err := os.Stat(distributionsDir); os.IsNotExist(err) { + err := os.MkdirAll(distributionsDir, 0750) + if err != nil { + return fmt.Errorf("cannot create folder for docker artifacts: %+v", err) + } + } + + // change dir to the buildDir location where the ironbank folder exists + // this will generate a tar.gz without some nested folders. + wd, _ := os.Getwd() + os.Chdir(buildDir) + defer os.Chdir(wd) + + // move the folder to the parent folder, there are two parent folder since + // buildDir contains a two folders dir. + tarGzFile := filepath.Join("..", "..", distributionsDir, ironbank+".tar.gz") + + // Save the build context as tar.gz artifact + err := devtools.Tar("./", tarGzFile) + if err != nil { + return fmt.Errorf("cannot compress the tar.gz file: %+v", err) + } + + return errors.Wrap(devtools.CreateSHA512File(tarGzFile), "failed to create .sha512 file") +} + +func getIronbankContextName() string { + version, _ := devtools.BeatQualifiedVersion() + defaultBinaryName := "{{.Name}}-ironbank-{{.Version}}{{if .Snapshot}}-SNAPSHOT{{end}}" + outputDir, _ := devtools.Expand(defaultBinaryName+"-docker-build-context", map[string]interface{}{ + "Name": "elastic-agent", + "Version": version, + }) + return outputDir +} + +func prepareIronbankBuild() error { + fmt.Println(">> prepareIronbankBuild: prepare the IronBank container context.") + buildDir := filepath.Join("build", getIronbankContextName()) + templatesDir := filepath.Join("dev-tools", "packaging", "templates", "ironbank") + + data := map[string]interface{}{ + "MajorMinor": majorMinor(), + } + + err := filepath.Walk(templatesDir, func(path string, info os.FileInfo, _ error) error { + if !info.IsDir() { + target := strings.TrimSuffix( + filepath.Join(buildDir, filepath.Base(path)), + ".tmpl", + ) + + err := devtools.ExpandFile(path, target, data) + if err != nil { + return errors.Wrapf(err, "expanding template '%s' to '%s'", path, target) + } + } + return nil + }) + + if err != nil { + return fmt.Errorf("cannot create templates for the IronBank: %+v", err) + } + + // copy files + sourcePath := filepath.Join("dev-tools", "packaging", "files", "ironbank") + if err := devtools.Copy(sourcePath, buildDir); err != nil { + return fmt.Errorf("cannot create files for the IronBank: %+v", err) + } + return nil +} + +func majorMinor() string { + if v, _ := devtools.BeatQualifiedVersion(); v != "" { + parts := strings.SplitN(v, ".", 3) + return parts[0] + "." + parts[1] + } + return "" +} From f26b0eb53fa5b08c23a3145156cd4a245ad04f32 Mon Sep 17 00:00:00 2001 From: Pierre HILBERT Date: Wed, 20 Jul 2022 21:01:21 +0200 Subject: [PATCH 056/180] Update README.md Adding M1 variable to export to be able to build AMD images --- README.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/README.md b/README.md index df27ac6f99a..2c0dbe31f69 100644 --- a/README.md +++ b/README.md @@ -10,6 +10,13 @@ Prerequisites: - [Docker](https://docs.docker.com/get-docker/) - [X-pack](https://github.com/elastic/beats/tree/main/x-pack) to pre-exist in the parent folder of the local Git repository checkout +If you are on a Mac with M1 chip, don't forget to export some docker variable to be able to build for AMD +``` +export DOCKER_BUILDKIT=0 +export COMPOSE_DOCKER_CLI_BUILD=0 +export DOCKER_DEFAULT_PLATFORM=linux/amd64 +``` + In Linux operating systems that you can not run docker as a root user you need to follow [linux-postinstall steps](https://docs.docker.com/engine/install/linux-postinstall/) ### Testing docker container From d84b957de85441e42932b5dee0bd93b8d8e2076d Mon Sep 17 00:00:00 2001 From: Mariana Dima Date: Thu, 21 Jul 2022 15:15:11 +0200 Subject: [PATCH 057/180] fix flaky (#730) --- .../pkg/artifact/download/http/downloader_test.go | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/internal/pkg/artifact/download/http/downloader_test.go b/internal/pkg/artifact/download/http/downloader_test.go index 26164df3a49..aac16a60f5d 100644 --- a/internal/pkg/artifact/download/http/downloader_test.go +++ b/internal/pkg/artifact/download/http/downloader_test.go @@ -25,7 +25,6 @@ import ( ) func TestDownloadBodyError(t *testing.T) { - t.Skip("Skipping flaky test: https://github.com/elastic/elastic-agent/issues/640") // This tests the scenario where the download encounters a network error // part way through the download, while copying the response body. @@ -65,9 +64,9 @@ func TestDownloadBodyError(t *testing.T) { } require.GreaterOrEqual(t, len(log.info), 1, "download error not logged at info level") - assert.Equal(t, log.info[len(log.info)-1].record, "download from %s failed at %s @ %sps: %s") + assert.True(t, containsMessage(log.info, "download from %s failed at %s @ %sps: %s")) require.GreaterOrEqual(t, len(log.warn), 1, "download error not logged at warn level") - assert.Equal(t, log.warn[len(log.warn)-1].record, "download from %s failed at %s @ %sps: %s") + assert.True(t, containsMessage(log.warn, "download from %s failed at %s @ %sps: %s")) } func TestDownloadLogProgressWithLength(t *testing.T) { @@ -208,3 +207,12 @@ func (f *recordLogger) Warnf(record string, args ...interface{}) { defer f.lock.Unlock() f.warn = append(f.warn, logMessage{record, args}) } + +func containsMessage(logs []logMessage, msg string) bool { + for _, item := range logs { + if item.record == msg { + return true + } + } + return false +} From be3c48e98171548f61f7aa4de16e48d1a288cff7 Mon Sep 17 00:00:00 2001 From: Tiago Queiroz Date: Fri, 22 Jul 2022 15:20:14 +0200 Subject: [PATCH 058/180] Add filestream ID on standalone kubernetes manifest (#742) This commit add unique IDs for the filestream inputs used by the Kubernetes integration in the Elastic-Agent standalone Kubernetes configuration/manifest file. --- CHANGELOG.next.asciidoc | 1 + deploy/kubernetes/elastic-agent-standalone-kubernetes.yaml | 2 ++ .../elastic-agent-standalone-daemonset-configmap.yaml | 2 ++ 3 files changed, 5 insertions(+) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 6ee9674253e..75d574a23b5 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -110,6 +110,7 @@ - Allow ':' characters in dynamic variables {issue}624[624] {pull}680[680] - Allow the - char to appear as part of variable names in eql expressions. {issue}709[709] {pull}710[710] - Allow the / char in variable names in eql and transpiler. {issue}715[715] {pull}718[718] +- Fix data duplication for standalone agent on Kubernetes using the default manifest {issue-beats}31512[31512] {pull}742[742] ==== New features diff --git a/deploy/kubernetes/elastic-agent-standalone-kubernetes.yaml b/deploy/kubernetes/elastic-agent-standalone-kubernetes.yaml index ab360f19bcb..0c084c4a81b 100644 --- a/deploy/kubernetes/elastic-agent-standalone-kubernetes.yaml +++ b/deploy/kubernetes/elastic-agent-standalone-kubernetes.yaml @@ -227,6 +227,7 @@ data: fields: ecs.version: 1.12.0 - name: container-log + id: container-log-${kubernetes.pod.name}-${kubernetes.container.id} type: filestream use_output: default meta: @@ -252,6 +253,7 @@ data: paths: - /var/log/containers/*${kubernetes.container.id}.log - name: audit-log + id: audit-log type: filestream use_output: default meta: diff --git a/deploy/kubernetes/elastic-agent-standalone/elastic-agent-standalone-daemonset-configmap.yaml b/deploy/kubernetes/elastic-agent-standalone/elastic-agent-standalone-daemonset-configmap.yaml index 6894f32bbe4..0581585e2fb 100644 --- a/deploy/kubernetes/elastic-agent-standalone/elastic-agent-standalone-daemonset-configmap.yaml +++ b/deploy/kubernetes/elastic-agent-standalone/elastic-agent-standalone-daemonset-configmap.yaml @@ -227,6 +227,7 @@ data: fields: ecs.version: 1.12.0 - name: container-log + id: container-log-${kubernetes.pod.name}-${kubernetes.container.id} type: filestream use_output: default meta: @@ -252,6 +253,7 @@ data: paths: - /var/log/containers/*${kubernetes.container.id}.log - name: audit-log + id: audit-log type: filestream use_output: default meta: From 83c4fa2cad2575103407df3e2e8ae0e8207e1dbc Mon Sep 17 00:00:00 2001 From: Michel Laterman <82832767+michel-laterman@users.noreply.github.com> Date: Fri, 22 Jul 2022 13:33:59 -0700 Subject: [PATCH 059/180] Alter github action to run on different OSs (#769) Alter the linter action to run on different OSs instead of on linux with the $GOOS env var. --- .github/workflows/golangci-lint.yml | 14 ++------------ internal/pkg/agent/application/secret/secret.go | 1 + 2 files changed, 3 insertions(+), 12 deletions(-) diff --git a/.github/workflows/golangci-lint.yml b/.github/workflows/golangci-lint.yml index 79a22cbabc5..8079fe1c673 100644 --- a/.github/workflows/golangci-lint.yml +++ b/.github/workflows/golangci-lint.yml @@ -14,18 +14,10 @@ jobs: golangci: strategy: matrix: - include: - - GOOS: windows - - GOOS: linux - - GOOS: darwin + os: [ ubuntu-latest, macos-latest, windows-latest ] name: lint - runs-on: ubuntu-latest + runs-on: ${{ matrix.os }} steps: - - name: Echo details - env: - GOOS: ${{ matrix.GOOS }} - run: echo Go GOOS=$GOOS - - uses: actions/checkout@v2 # Uses Go version from the repository. @@ -38,8 +30,6 @@ jobs: go-version: "${{ steps.goversion.outputs.version }}" - name: golangci-lint - env: - GOOS: ${{ matrix.GOOS }} uses: golangci/golangci-lint-action@v2 with: # Optional: version of golangci-lint to use in form of v1.2 or v1.2.3 or `latest` to use the latest version diff --git a/internal/pkg/agent/application/secret/secret.go b/internal/pkg/agent/application/secret/secret.go index cf690bf24e8..bd2ee546454 100644 --- a/internal/pkg/agent/application/secret/secret.go +++ b/internal/pkg/agent/application/secret/secret.go @@ -2,6 +2,7 @@ // or more contributor license agreements. Licensed under the Elastic License; // you may not use this file except in compliance with the Elastic License. +// Package secret manages application secrets. package secret import ( From c05ce638b80ff5b6c76c1242ba4e5506bc903ebe Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Mon, 25 Jul 2022 01:35:11 -0400 Subject: [PATCH 060/180] [Automation] Update elastic stack version to 8.4.0-d058e92f for testing (#771) Co-authored-by: apmmachine --- testing/environments/snapshot.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/testing/environments/snapshot.yml b/testing/environments/snapshot.yml index 9e2fd843b08..b8b6792b912 100644 --- a/testing/environments/snapshot.yml +++ b/testing/environments/snapshot.yml @@ -3,7 +3,7 @@ version: '2.3' services: elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch:8.4.0-31269fd2-SNAPSHOT + image: docker.elastic.co/elasticsearch/elasticsearch:8.4.0-d058e92f-SNAPSHOT # When extend is used it merges healthcheck.tests, see: # https://github.com/docker/compose/issues/8962 # healthcheck: @@ -42,7 +42,7 @@ services: - ./docker/logstash/pki:/etc/pki:ro kibana: - image: docker.elastic.co/kibana/kibana:8.4.0-31269fd2-SNAPSHOT + image: docker.elastic.co/kibana/kibana:8.4.0-d058e92f-SNAPSHOT environment: - "ELASTICSEARCH_USERNAME=kibana_system_user" - "ELASTICSEARCH_PASSWORD=testing" From 623fe82552a34196e51a9ce1749bf4e37cb23ec5 Mon Sep 17 00:00:00 2001 From: Tetiana Kravchenko Date: Mon, 25 Jul 2022 10:40:25 +0200 Subject: [PATCH 061/180] elastic-agent manifests: add comments; add cloudnative team as a codeowner for the k8s manifests (#708) * managed elastic-agent: add comments; add cloudnative team as a codeowner for the k8s manifests Signed-off-by: Tetiana Kravchenko * add comments to the standalone elastic-agent, similar to the documentation we have https://www.elastic.co/guide/en/fleet/current/running-on-kubernetes-standalone.html Signed-off-by: Tetiana Kravchenko * Apply suggestions from code review Co-authored-by: Michael Katsoulis Co-authored-by: Andrew Gizas * remove comment for FLEET_ENROLLMENT_TOKEN; use Needed everywhere instead of Required Signed-off-by: Tetiana Kravchenko * rephrase regarding accessing kube-state-metrics when used third party tools, like kube-rbac-proxy Signed-off-by: Tetiana Kravchenko * run make check Signed-off-by: Tetiana Kravchenko * keep manifests in sync to pass ci check Signed-off-by: Tetiana Kravchenko * add info on where to find FLEET_URL and FLEET_ENROLLMENT_TOKEN Signed-off-by: Tetiana Kravchenko * add links to elastic-agent documentation Signed-off-by: Tetiana Kravchenko * update comment on FLEET_ENROLLMENT_TOKEN Signed-off-by: Tetiana Kravchenko Co-authored-by: Michael Katsoulis Co-authored-by: Andrew Gizas --- .github/CODEOWNERS | 4 +- .../elastic-agent-managed-kubernetes.yaml | 30 ++++- .../elastic-agent-managed-daemonset.yaml | 23 +++- .../elastic-agent-managed-role.yaml | 7 +- .../elastic-agent-standalone-kubernetes.yaml | 107 +++++++++++++++++- ...-agent-standalone-daemonset-configmap.yaml | 89 ++++++++++++++- .../elastic-agent-standalone-daemonset.yaml | 11 ++ .../elastic-agent-standalone-role.yaml | 7 +- 8 files changed, 253 insertions(+), 25 deletions(-) diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index bc147bf0680..d8bc0072d7b 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -1,2 +1,4 @@ -# Team responsable for Fleet Server +# Team responsible for Fleet Server * @elastic/elastic-agent-control-plane + +/deploy/kubernetes @elastic/obs-cloudnative-monitoring diff --git a/deploy/kubernetes/elastic-agent-managed-kubernetes.yaml b/deploy/kubernetes/elastic-agent-managed-kubernetes.yaml index 4771cf37727..1e2403f47a2 100644 --- a/deploy/kubernetes/elastic-agent-managed-kubernetes.yaml +++ b/deploy/kubernetes/elastic-agent-managed-kubernetes.yaml @@ -1,3 +1,4 @@ +# For more information refer to https://www.elastic.co/guide/en/fleet/current/running-on-kubernetes-managed-by-fleet.html apiVersion: apps/v1 kind: DaemonSet metadata: @@ -14,34 +15,41 @@ spec: labels: app: elastic-agent spec: + # Tolerations are needed to run Elastic Agent on Kubernetes master nodes. + # Agents running on master nodes collect metrics from the control plane components (scheduler, controller manager) of Kubernetes tolerations: - key: node-role.kubernetes.io/master effect: NoSchedule serviceAccountName: elastic-agent hostNetwork: true + # 'hostPID: true' enables the Elastic Security integration to observe all process exec events on the host. # Sharing the host process ID namespace gives visibility of all processes running on the same host. - # This enables the Elastic Security integration to observe all process exec events on the host. hostPID: true dnsPolicy: ClusterFirstWithHostNet containers: - name: elastic-agent image: docker.elastic.co/beats/elastic-agent:8.3.0 env: + # Set to 1 for enrollment into Fleet server. If not set, Elastic Agent is run in standalone mode - name: FLEET_ENROLL value: "1" - # Set to true in case of insecure or unverified HTTP + # Set to true to communicate with Fleet with either insecure HTTP or unverified HTTPS - name: FLEET_INSECURE value: "true" - # The ip:port pair of fleet server + # Fleet Server URL to enroll the Elastic Agent into + # FLEET_URL can be found in Kibana, go to Management > Fleet > Settings - name: FLEET_URL value: "https://fleet-server:8220" - # If left empty KIBANA_HOST, KIBANA_FLEET_USERNAME, KIBANA_FLEET_PASSWORD are needed + # Elasticsearch API key used to enroll Elastic Agents in Fleet (https://www.elastic.co/guide/en/fleet/current/fleet-enrollment-tokens.html#fleet-enrollment-tokens) + # If FLEET_ENROLLMENT_TOKEN is empty then KIBANA_HOST, KIBANA_FLEET_USERNAME, KIBANA_FLEET_PASSWORD are needed - name: FLEET_ENROLLMENT_TOKEN value: "" - name: KIBANA_HOST value: "http://kibana:5601" + # The basic authentication username used to connect to Kibana and retrieve a service_token to enable Fleet - name: KIBANA_FLEET_USERNAME value: "elastic" + # The basic authentication password used to connect to Kibana and retrieve a service_token to enable Fleet - name: KIBANA_FLEET_PASSWORD value: "changeme" - name: NODE_NAME @@ -104,21 +112,28 @@ spec: - name: varlog hostPath: path: /var/log + # Needed for cloudbeat - name: etc-kubernetes hostPath: path: /etc/kubernetes + # Needed for cloudbeat - name: var-lib hostPath: path: /var/lib + # Needed for cloudbeat - name: passwd hostPath: path: /etc/passwd + # Needed for cloudbeat - name: group hostPath: path: /etc/group + # Needed for cloudbeat - name: etcsysmd hostPath: path: /etc/systemd + # Mount /etc/machine-id from the host to determine host ID + # Needed for Elastic Security integration - name: etc-mid hostPath: path: /etc/machine-id @@ -180,6 +195,7 @@ rules: - pods - services - configmaps + # Needed for cloudbeat - serviceaccounts - persistentvolumes - persistentvolumeclaims @@ -211,11 +227,12 @@ rules: - jobs - cronjobs verbs: [ "get", "list", "watch" ] - # required for apiserver + # Needed for apiserver - nonResourceURLs: - "/metrics" verbs: - get + # Needed for cloudbeat - apiGroups: ["rbac.authorization.k8s.io"] resources: - clusterrolebindings @@ -223,6 +240,7 @@ rules: - rolebindings - roles verbs: ["get", "list", "watch"] + # Needed for cloudbeat - apiGroups: ["policy"] resources: - podsecuritypolicies @@ -232,7 +250,7 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: name: elastic-agent - # should be the namespace where elastic-agent is running + # Should be the namespace where elastic-agent is running namespace: kube-system labels: k8s-app: elastic-agent diff --git a/deploy/kubernetes/elastic-agent-managed/elastic-agent-managed-daemonset.yaml b/deploy/kubernetes/elastic-agent-managed/elastic-agent-managed-daemonset.yaml index 231b976fe71..c3c679efa36 100644 --- a/deploy/kubernetes/elastic-agent-managed/elastic-agent-managed-daemonset.yaml +++ b/deploy/kubernetes/elastic-agent-managed/elastic-agent-managed-daemonset.yaml @@ -1,3 +1,4 @@ +# For more information refer to https://www.elastic.co/guide/en/fleet/current/running-on-kubernetes-managed-by-fleet.html apiVersion: apps/v1 kind: DaemonSet metadata: @@ -14,34 +15,41 @@ spec: labels: app: elastic-agent spec: + # Tolerations are needed to run Elastic Agent on Kubernetes master nodes. + # Agents running on master nodes collect metrics from the control plane components (scheduler, controller manager) of Kubernetes tolerations: - key: node-role.kubernetes.io/master effect: NoSchedule serviceAccountName: elastic-agent hostNetwork: true + # 'hostPID: true' enables the Elastic Security integration to observe all process exec events on the host. # Sharing the host process ID namespace gives visibility of all processes running on the same host. - # This enables the Elastic Security integration to observe all process exec events on the host. hostPID: true dnsPolicy: ClusterFirstWithHostNet containers: - name: elastic-agent image: docker.elastic.co/beats/elastic-agent:%VERSION% env: + # Set to 1 for enrollment into Fleet server. If not set, Elastic Agent is run in standalone mode - name: FLEET_ENROLL value: "1" - # Set to true in case of insecure or unverified HTTP + # Set to true to communicate with Fleet with either insecure HTTP or unverified HTTPS - name: FLEET_INSECURE value: "true" - # The ip:port pair of fleet server + # Fleet Server URL to enroll the Elastic Agent into + # FLEET_URL can be found in Kibana, go to Management > Fleet > Settings - name: FLEET_URL value: "https://fleet-server:8220" - # If left empty KIBANA_HOST, KIBANA_FLEET_USERNAME, KIBANA_FLEET_PASSWORD are needed + # Elasticsearch API key used to enroll Elastic Agents in Fleet (https://www.elastic.co/guide/en/fleet/current/fleet-enrollment-tokens.html#fleet-enrollment-tokens) + # If FLEET_ENROLLMENT_TOKEN is empty then KIBANA_HOST, KIBANA_FLEET_USERNAME, KIBANA_FLEET_PASSWORD are needed - name: FLEET_ENROLLMENT_TOKEN value: "" - name: KIBANA_HOST value: "http://kibana:5601" + # The basic authentication username used to connect to Kibana and retrieve a service_token to enable Fleet - name: KIBANA_FLEET_USERNAME value: "elastic" + # The basic authentication password used to connect to Kibana and retrieve a service_token to enable Fleet - name: KIBANA_FLEET_PASSWORD value: "changeme" - name: NODE_NAME @@ -104,21 +112,28 @@ spec: - name: varlog hostPath: path: /var/log + # Needed for cloudbeat - name: etc-kubernetes hostPath: path: /etc/kubernetes + # Needed for cloudbeat - name: var-lib hostPath: path: /var/lib + # Needed for cloudbeat - name: passwd hostPath: path: /etc/passwd + # Needed for cloudbeat - name: group hostPath: path: /etc/group + # Needed for cloudbeat - name: etcsysmd hostPath: path: /etc/systemd + # Mount /etc/machine-id from the host to determine host ID + # Needed for Elastic Security integration - name: etc-mid hostPath: path: /etc/machine-id diff --git a/deploy/kubernetes/elastic-agent-managed/elastic-agent-managed-role.yaml b/deploy/kubernetes/elastic-agent-managed/elastic-agent-managed-role.yaml index 0ef5b850782..0d961215f4e 100644 --- a/deploy/kubernetes/elastic-agent-managed/elastic-agent-managed-role.yaml +++ b/deploy/kubernetes/elastic-agent-managed/elastic-agent-managed-role.yaml @@ -13,6 +13,7 @@ rules: - pods - services - configmaps + # Needed for cloudbeat - serviceaccounts - persistentvolumes - persistentvolumeclaims @@ -44,11 +45,12 @@ rules: - jobs - cronjobs verbs: [ "get", "list", "watch" ] - # required for apiserver + # Needed for apiserver - nonResourceURLs: - "/metrics" verbs: - get + # Needed for cloudbeat - apiGroups: ["rbac.authorization.k8s.io"] resources: - clusterrolebindings @@ -56,6 +58,7 @@ rules: - rolebindings - roles verbs: ["get", "list", "watch"] + # Needed for cloudbeat - apiGroups: ["policy"] resources: - podsecuritypolicies @@ -65,7 +68,7 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: name: elastic-agent - # should be the namespace where elastic-agent is running + # Should be the namespace where elastic-agent is running namespace: kube-system labels: k8s-app: elastic-agent diff --git a/deploy/kubernetes/elastic-agent-standalone-kubernetes.yaml b/deploy/kubernetes/elastic-agent-standalone-kubernetes.yaml index 0c084c4a81b..0984f0dc8ac 100644 --- a/deploy/kubernetes/elastic-agent-standalone-kubernetes.yaml +++ b/deploy/kubernetes/elastic-agent-standalone-kubernetes.yaml @@ -1,3 +1,4 @@ +# For more information refer to https://www.elastic.co/guide/en/fleet/current/running-on-kubernetes-standalone.html apiVersion: v1 kind: ConfigMap metadata: @@ -63,7 +64,9 @@ data: hosts: - 'kube-state-metrics:8080' period: 10s - # If `https` is used to access `kube-state-metrics`, then to all `kubernetes.state_*` datasets should be added: + # Openshift: + # if to access `kube-state-metrics` are used third party tools, like kube-rbac-proxy or similar, that perform RBAC authorization + # and/or tls termination, then configuration below should be considered: # bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token # ssl.certificate_authorities: # - /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt @@ -76,6 +79,12 @@ data: hosts: - 'kube-state-metrics:8080' period: 10s + # Openshift: + # if to access `kube-state-metrics` are used third party tools, like kube-rbac-proxy or similar, that perform RBAC authorization + # and/or tls termination, then configuration below should be considered: + # bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + # ssl.certificate_authorities: + # - /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt - data_stream: dataset: kubernetes.state_daemonset type: metrics @@ -85,6 +94,12 @@ data: hosts: - 'kube-state-metrics:8080' period: 10s + # Openshift: + # if to access `kube-state-metrics` are used third party tools, like kube-rbac-proxy or similar, that perform RBAC authorization + # and/or tls termination, then configuration below should be considered: + # bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + # ssl.certificate_authorities: + # - /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt - data_stream: dataset: kubernetes.state_deployment type: metrics @@ -94,6 +109,12 @@ data: hosts: - 'kube-state-metrics:8080' period: 10s + # Openshift: + # if to access `kube-state-metrics` are used third party tools, like kube-rbac-proxy or similar, that perform RBAC authorization + # and/or tls termination, then configuration below should be considered: + # bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + # ssl.certificate_authorities: + # - /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt - data_stream: dataset: kubernetes.state_job type: metrics @@ -103,6 +124,12 @@ data: hosts: - 'kube-state-metrics:8080' period: 10s + # Openshift: + # if to access `kube-state-metrics` are used third party tools, like kube-rbac-proxy or similar, that perform RBAC authorization + # and/or tls termination, then configuration below should be considered: + # bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + # ssl.certificate_authorities: + # - /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt - data_stream: dataset: kubernetes.state_node type: metrics @@ -112,6 +139,12 @@ data: hosts: - 'kube-state-metrics:8080' period: 10s + # Openshift: + # if to access `kube-state-metrics` are used third party tools, like kube-rbac-proxy or similar, that perform RBAC authorization + # and/or tls termination, then configuration below should be considered: + # bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + # ssl.certificate_authorities: + # - /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt - data_stream: dataset: kubernetes.state_persistentvolume type: metrics @@ -121,6 +154,12 @@ data: hosts: - 'kube-state-metrics:8080' period: 10s + # Openshift: + # if to access `kube-state-metrics` are used third party tools, like kube-rbac-proxy or similar, that perform RBAC authorization + # and/or tls termination, then configuration below should be considered: + # bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + # ssl.certificate_authorities: + # - /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt - data_stream: dataset: kubernetes.state_persistentvolumeclaim type: metrics @@ -130,6 +169,12 @@ data: hosts: - 'kube-state-metrics:8080' period: 10s + # Openshift: + # if to access `kube-state-metrics` are used third party tools, like kube-rbac-proxy or similar, that perform RBAC authorization + # and/or tls termination, then configuration below should be considered: + # bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + # ssl.certificate_authorities: + # - /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt - data_stream: dataset: kubernetes.state_pod type: metrics @@ -139,6 +184,12 @@ data: hosts: - 'kube-state-metrics:8080' period: 10s + # Openshift: + # if to access `kube-state-metrics` are used third party tools, like kube-rbac-proxy or similar, that perform RBAC authorization + # and/or tls termination, then configuration below should be considered: + # bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + # ssl.certificate_authorities: + # - /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt - data_stream: dataset: kubernetes.state_replicaset type: metrics @@ -148,6 +199,12 @@ data: hosts: - 'kube-state-metrics:8080' period: 10s + # Openshift: + # if to access `kube-state-metrics` are used third party tools, like kube-rbac-proxy or similar, that perform RBAC authorization + # and/or tls termination, then configuration below should be considered: + # bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + # ssl.certificate_authorities: + # - /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt - data_stream: dataset: kubernetes.state_resourcequota type: metrics @@ -157,6 +214,12 @@ data: hosts: - 'kube-state-metrics:8080' period: 10s + # Openshift: + # if to access `kube-state-metrics` are used third party tools, like kube-rbac-proxy or similar, that perform RBAC authorization + # and/or tls termination, then configuration below should be considered: + # bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + # ssl.certificate_authorities: + # - /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt - data_stream: dataset: kubernetes.state_service type: metrics @@ -166,6 +229,12 @@ data: hosts: - 'kube-state-metrics:8080' period: 10s + # Openshift: + # if to access `kube-state-metrics` are used third party tools, like kube-rbac-proxy or similar, that perform RBAC authorization + # and/or tls termination, then configuration below should be considered: + # bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + # ssl.certificate_authorities: + # - /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt - data_stream: dataset: kubernetes.state_statefulset type: metrics @@ -175,6 +244,12 @@ data: hosts: - 'kube-state-metrics:8080' period: 10s + # Openshift: + # if to access `kube-state-metrics` are used third party tools, like kube-rbac-proxy or similar, that perform RBAC authorization + # and/or tls termination, then configuration below should be considered: + # bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + # ssl.certificate_authorities: + # - /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt - data_stream: dataset: kubernetes.state_storageclass type: metrics @@ -184,6 +259,12 @@ data: hosts: - 'kube-state-metrics:8080' period: 10s + # Openshift: + # if to access `kube-state-metrics` are used third party tools, like kube-rbac-proxy or similar, that perform RBAC authorization + # and/or tls termination, then configuration below should be considered: + # bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + # ssl.certificate_authorities: + # - /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt - name: system-logs type: logfile use_output: default @@ -417,7 +498,7 @@ data: period: 10s ssl.verification_mode: none condition: ${kubernetes.labels.component} == 'kube-controller-manager' - # Openshift: + # On Openshift condition should be adjusted: # condition: ${kubernetes.labels.app} == 'kube-controller-manager' - data_stream: dataset: kubernetes.scheduler @@ -430,7 +511,7 @@ data: period: 10s ssl.verification_mode: none condition: ${kubernetes.labels.component} == 'kube-scheduler' - # Openshift: + # On Openshift condition should be adjusted: # condition: ${kubernetes.labels.app} == 'openshift-kube-scheduler' - data_stream: dataset: kubernetes.proxy @@ -439,7 +520,7 @@ data: - proxy hosts: - 'localhost:10249' - # Openshift: + # On Openshift port should be adjusted: # - 'localhost:29101' period: 10s - data_stream: @@ -559,6 +640,8 @@ spec: labels: app: elastic-agent-standalone spec: + # Tolerations are needed to run Elastic Agent on Kubernetes master nodes. + # Agents running on master nodes collect metrics from the control plane components (scheduler, controller manager) of Kubernetes tolerations: - key: node-role.kubernetes.io/master effect: NoSchedule @@ -573,10 +656,14 @@ spec: "-e", ] env: + # The basic authentication username used to connect to Elasticsearch + # This user needs the privileges required to publish events to Elasticsearch. - name: ES_USERNAME value: "elastic" + # The basic authentication password used to connect to Elasticsearch - name: ES_PASSWORD value: "" + # The Elasticsearch host to communicate with - name: ES_HOST value: "" - name: NODE_NAME @@ -644,18 +731,23 @@ spec: - name: varlog hostPath: path: /var/log + # Needed for cloudbeat - name: etc-kubernetes hostPath: path: /etc/kubernetes + # Needed for cloudbeat - name: var-lib hostPath: path: /var/lib + # Needed for cloudbeat - name: passwd hostPath: path: /etc/passwd + # Needed for cloudbeat - name: group hostPath: path: /etc/group + # Needed for cloudbeat - name: etcsysmd hostPath: path: /etc/systemd @@ -716,6 +808,7 @@ rules: - pods - services - configmaps + # Needed for cloudbeat - serviceaccounts - persistentvolumes - persistentvolumeclaims @@ -747,11 +840,12 @@ rules: - nodes/stats verbs: - get - # required for apiserver + # Needed for apiserver - nonResourceURLs: - "/metrics" verbs: - get + # Needed for cloudbeat - apiGroups: ["rbac.authorization.k8s.io"] resources: - clusterrolebindings @@ -759,6 +853,7 @@ rules: - rolebindings - roles verbs: ["get", "list", "watch"] + # Needed for cloudbeat - apiGroups: ["policy"] resources: - podsecuritypolicies @@ -768,7 +863,7 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: name: elastic-agent-standalone - # should be the namespace where elastic-agent is running + # Should be the namespace where elastic-agent is running namespace: kube-system labels: k8s-app: elastic-agent-standalone diff --git a/deploy/kubernetes/elastic-agent-standalone/elastic-agent-standalone-daemonset-configmap.yaml b/deploy/kubernetes/elastic-agent-standalone/elastic-agent-standalone-daemonset-configmap.yaml index 0581585e2fb..7048bf22adb 100644 --- a/deploy/kubernetes/elastic-agent-standalone/elastic-agent-standalone-daemonset-configmap.yaml +++ b/deploy/kubernetes/elastic-agent-standalone/elastic-agent-standalone-daemonset-configmap.yaml @@ -1,3 +1,4 @@ +# For more information refer to https://www.elastic.co/guide/en/fleet/current/running-on-kubernetes-standalone.html apiVersion: v1 kind: ConfigMap metadata: @@ -63,7 +64,9 @@ data: hosts: - 'kube-state-metrics:8080' period: 10s - # If `https` is used to access `kube-state-metrics`, then to all `kubernetes.state_*` datasets should be added: + # Openshift: + # if to access `kube-state-metrics` are used third party tools, like kube-rbac-proxy or similar, that perform RBAC authorization + # and/or tls termination, then configuration below should be considered: # bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token # ssl.certificate_authorities: # - /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt @@ -76,6 +79,12 @@ data: hosts: - 'kube-state-metrics:8080' period: 10s + # Openshift: + # if to access `kube-state-metrics` are used third party tools, like kube-rbac-proxy or similar, that perform RBAC authorization + # and/or tls termination, then configuration below should be considered: + # bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + # ssl.certificate_authorities: + # - /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt - data_stream: dataset: kubernetes.state_daemonset type: metrics @@ -85,6 +94,12 @@ data: hosts: - 'kube-state-metrics:8080' period: 10s + # Openshift: + # if to access `kube-state-metrics` are used third party tools, like kube-rbac-proxy or similar, that perform RBAC authorization + # and/or tls termination, then configuration below should be considered: + # bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + # ssl.certificate_authorities: + # - /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt - data_stream: dataset: kubernetes.state_deployment type: metrics @@ -94,6 +109,12 @@ data: hosts: - 'kube-state-metrics:8080' period: 10s + # Openshift: + # if to access `kube-state-metrics` are used third party tools, like kube-rbac-proxy or similar, that perform RBAC authorization + # and/or tls termination, then configuration below should be considered: + # bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + # ssl.certificate_authorities: + # - /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt - data_stream: dataset: kubernetes.state_job type: metrics @@ -103,6 +124,12 @@ data: hosts: - 'kube-state-metrics:8080' period: 10s + # Openshift: + # if to access `kube-state-metrics` are used third party tools, like kube-rbac-proxy or similar, that perform RBAC authorization + # and/or tls termination, then configuration below should be considered: + # bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + # ssl.certificate_authorities: + # - /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt - data_stream: dataset: kubernetes.state_node type: metrics @@ -112,6 +139,12 @@ data: hosts: - 'kube-state-metrics:8080' period: 10s + # Openshift: + # if to access `kube-state-metrics` are used third party tools, like kube-rbac-proxy or similar, that perform RBAC authorization + # and/or tls termination, then configuration below should be considered: + # bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + # ssl.certificate_authorities: + # - /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt - data_stream: dataset: kubernetes.state_persistentvolume type: metrics @@ -121,6 +154,12 @@ data: hosts: - 'kube-state-metrics:8080' period: 10s + # Openshift: + # if to access `kube-state-metrics` are used third party tools, like kube-rbac-proxy or similar, that perform RBAC authorization + # and/or tls termination, then configuration below should be considered: + # bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + # ssl.certificate_authorities: + # - /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt - data_stream: dataset: kubernetes.state_persistentvolumeclaim type: metrics @@ -130,6 +169,12 @@ data: hosts: - 'kube-state-metrics:8080' period: 10s + # Openshift: + # if to access `kube-state-metrics` are used third party tools, like kube-rbac-proxy or similar, that perform RBAC authorization + # and/or tls termination, then configuration below should be considered: + # bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + # ssl.certificate_authorities: + # - /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt - data_stream: dataset: kubernetes.state_pod type: metrics @@ -139,6 +184,12 @@ data: hosts: - 'kube-state-metrics:8080' period: 10s + # Openshift: + # if to access `kube-state-metrics` are used third party tools, like kube-rbac-proxy or similar, that perform RBAC authorization + # and/or tls termination, then configuration below should be considered: + # bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + # ssl.certificate_authorities: + # - /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt - data_stream: dataset: kubernetes.state_replicaset type: metrics @@ -148,6 +199,12 @@ data: hosts: - 'kube-state-metrics:8080' period: 10s + # Openshift: + # if to access `kube-state-metrics` are used third party tools, like kube-rbac-proxy or similar, that perform RBAC authorization + # and/or tls termination, then configuration below should be considered: + # bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + # ssl.certificate_authorities: + # - /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt - data_stream: dataset: kubernetes.state_resourcequota type: metrics @@ -157,6 +214,12 @@ data: hosts: - 'kube-state-metrics:8080' period: 10s + # Openshift: + # if to access `kube-state-metrics` are used third party tools, like kube-rbac-proxy or similar, that perform RBAC authorization + # and/or tls termination, then configuration below should be considered: + # bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + # ssl.certificate_authorities: + # - /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt - data_stream: dataset: kubernetes.state_service type: metrics @@ -166,6 +229,12 @@ data: hosts: - 'kube-state-metrics:8080' period: 10s + # Openshift: + # if to access `kube-state-metrics` are used third party tools, like kube-rbac-proxy or similar, that perform RBAC authorization + # and/or tls termination, then configuration below should be considered: + # bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + # ssl.certificate_authorities: + # - /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt - data_stream: dataset: kubernetes.state_statefulset type: metrics @@ -175,6 +244,12 @@ data: hosts: - 'kube-state-metrics:8080' period: 10s + # Openshift: + # if to access `kube-state-metrics` are used third party tools, like kube-rbac-proxy or similar, that perform RBAC authorization + # and/or tls termination, then configuration below should be considered: + # bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + # ssl.certificate_authorities: + # - /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt - data_stream: dataset: kubernetes.state_storageclass type: metrics @@ -184,6 +259,12 @@ data: hosts: - 'kube-state-metrics:8080' period: 10s + # Openshift: + # if to access `kube-state-metrics` are used third party tools, like kube-rbac-proxy or similar, that perform RBAC authorization + # and/or tls termination, then configuration below should be considered: + # bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + # ssl.certificate_authorities: + # - /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt - name: system-logs type: logfile use_output: default @@ -417,7 +498,7 @@ data: period: 10s ssl.verification_mode: none condition: ${kubernetes.labels.component} == 'kube-controller-manager' - # Openshift: + # On Openshift condition should be adjusted: # condition: ${kubernetes.labels.app} == 'kube-controller-manager' - data_stream: dataset: kubernetes.scheduler @@ -430,7 +511,7 @@ data: period: 10s ssl.verification_mode: none condition: ${kubernetes.labels.component} == 'kube-scheduler' - # Openshift: + # On Openshift condition should be adjusted: # condition: ${kubernetes.labels.app} == 'openshift-kube-scheduler' - data_stream: dataset: kubernetes.proxy @@ -439,7 +520,7 @@ data: - proxy hosts: - 'localhost:10249' - # Openshift: + # On Openshift port should be adjusted: # - 'localhost:29101' period: 10s - data_stream: diff --git a/deploy/kubernetes/elastic-agent-standalone/elastic-agent-standalone-daemonset.yaml b/deploy/kubernetes/elastic-agent-standalone/elastic-agent-standalone-daemonset.yaml index 2a0f23107f1..0bf131ec8ea 100644 --- a/deploy/kubernetes/elastic-agent-standalone/elastic-agent-standalone-daemonset.yaml +++ b/deploy/kubernetes/elastic-agent-standalone/elastic-agent-standalone-daemonset.yaml @@ -14,6 +14,8 @@ spec: labels: app: elastic-agent-standalone spec: + # Tolerations are needed to run Elastic Agent on Kubernetes master nodes. + # Agents running on master nodes collect metrics from the control plane components (scheduler, controller manager) of Kubernetes tolerations: - key: node-role.kubernetes.io/master effect: NoSchedule @@ -28,10 +30,14 @@ spec: "-e", ] env: + # The basic authentication username used to connect to Elasticsearch + # This user needs the privileges required to publish events to Elasticsearch. - name: ES_USERNAME value: "elastic" + # The basic authentication password used to connect to Elasticsearch - name: ES_PASSWORD value: "" + # The Elasticsearch host to communicate with - name: ES_HOST value: "" - name: NODE_NAME @@ -99,18 +105,23 @@ spec: - name: varlog hostPath: path: /var/log + # Needed for cloudbeat - name: etc-kubernetes hostPath: path: /etc/kubernetes + # Needed for cloudbeat - name: var-lib hostPath: path: /var/lib + # Needed for cloudbeat - name: passwd hostPath: path: /etc/passwd + # Needed for cloudbeat - name: group hostPath: path: /etc/group + # Needed for cloudbeat - name: etcsysmd hostPath: path: /etc/systemd diff --git a/deploy/kubernetes/elastic-agent-standalone/elastic-agent-standalone-role.yaml b/deploy/kubernetes/elastic-agent-standalone/elastic-agent-standalone-role.yaml index b253f0520fe..8a644f3aadf 100644 --- a/deploy/kubernetes/elastic-agent-standalone/elastic-agent-standalone-role.yaml +++ b/deploy/kubernetes/elastic-agent-standalone/elastic-agent-standalone-role.yaml @@ -13,6 +13,7 @@ rules: - pods - services - configmaps + # Needed for cloudbeat - serviceaccounts - persistentvolumes - persistentvolumeclaims @@ -44,11 +45,12 @@ rules: - nodes/stats verbs: - get - # required for apiserver + # Needed for apiserver - nonResourceURLs: - "/metrics" verbs: - get + # Needed for cloudbeat - apiGroups: ["rbac.authorization.k8s.io"] resources: - clusterrolebindings @@ -56,6 +58,7 @@ rules: - rolebindings - roles verbs: ["get", "list", "watch"] + # Needed for cloudbeat - apiGroups: ["policy"] resources: - podsecuritypolicies @@ -65,7 +68,7 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: name: elastic-agent-standalone - # should be the namespace where elastic-agent is running + # Should be the namespace where elastic-agent is running namespace: kube-system labels: k8s-app: elastic-agent-standalone From 56f22161e677f5524241588264ce9a38abb21c97 Mon Sep 17 00:00:00 2001 From: Michal Pristas Date: Tue, 26 Jul 2022 14:17:59 +0200 Subject: [PATCH 062/180] [Elastic-Agent] Added source uri reloading (#686) --- .../pkg/agent/application/managed_mode.go | 2 + .../handlers/handler_action_policy_change.go | 2 +- .../handler_action_policy_change_test.go | 8 +-- internal/pkg/artifact/config.go | 53 ++++++++++++++++++- 4 files changed, 59 insertions(+), 6 deletions(-) diff --git a/internal/pkg/agent/application/managed_mode.go b/internal/pkg/agent/application/managed_mode.go index d334ae0198c..3f98e78fd62 100644 --- a/internal/pkg/agent/application/managed_mode.go +++ b/internal/pkg/agent/application/managed_mode.go @@ -31,6 +31,7 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/agent/operation" "github.com/elastic/elastic-agent/internal/pkg/agent/storage" "github.com/elastic/elastic-agent/internal/pkg/agent/storage/store" + "github.com/elastic/elastic-agent/internal/pkg/artifact" "github.com/elastic/elastic-agent/internal/pkg/capabilities" "github.com/elastic/elastic-agent/internal/pkg/composable" "github.com/elastic/elastic-agent/internal/pkg/config" @@ -157,6 +158,7 @@ func newManaged( }, caps, monitor, + artifact.NewReloader(cfg.Settings.DownloadConfig, log), ) if err != nil { return nil, err diff --git a/internal/pkg/agent/application/pipeline/actions/handlers/handler_action_policy_change.go b/internal/pkg/agent/application/pipeline/actions/handlers/handler_action_policy_change.go index 3775d12b352..ad75299e420 100644 --- a/internal/pkg/agent/application/pipeline/actions/handlers/handler_action_policy_change.go +++ b/internal/pkg/agent/application/pipeline/actions/handlers/handler_action_policy_change.go @@ -151,7 +151,7 @@ func (h *PolicyChange) handleFleetServerHosts(ctx context.Context, c *config.Con errors.TypeNetwork, errors.M("hosts", h.config.Fleet.Client.Hosts)) } // discard body for proper cancellation and connection reuse - io.Copy(ioutil.Discard, resp.Body) + _, _ = io.Copy(ioutil.Discard, resp.Body) resp.Body.Close() reader, err := fleetToReader(h.agentInfo, h.config) diff --git a/internal/pkg/agent/application/pipeline/actions/handlers/handler_action_policy_change_test.go b/internal/pkg/agent/application/pipeline/actions/handlers/handler_action_policy_change_test.go index d887e755154..e2d480ee6fe 100644 --- a/internal/pkg/agent/application/pipeline/actions/handlers/handler_action_policy_change_test.go +++ b/internal/pkg/agent/application/pipeline/actions/handlers/handler_action_policy_change_test.go @@ -44,7 +44,7 @@ func TestPolicyChange(t *testing.T) { conf := map[string]interface{}{"hello": "world"} action := &fleetapi.ActionPolicyChange{ - ActionID: "abc123", + ActionID: "TestPolicyChange-abc1", ActionType: "POLICY_CHANGE", Policy: conf, } @@ -69,7 +69,7 @@ func TestPolicyChange(t *testing.T) { conf := map[string]interface{}{"hello": "world"} action := &fleetapi.ActionPolicyChange{ - ActionID: "abc123", + ActionID: "TestPolicyChange-abc2", ActionType: "POLICY_CHANGE", Policy: conf, } @@ -100,7 +100,7 @@ func TestPolicyAcked(t *testing.T) { emitter := &mockEmitter{err: mockErr} config := map[string]interface{}{"hello": "world"} - actionID := "abc123" + actionID := "TestPolicyAcked-abc1" action := &fleetapi.ActionPolicyChange{ ActionID: actionID, ActionType: "POLICY_CHANGE", @@ -129,7 +129,7 @@ func TestPolicyAcked(t *testing.T) { emitter := &mockEmitter{} config := map[string]interface{}{"hello": "world"} - actionID := "abc123" + actionID := "TestPolicyAcked-abc2" action := &fleetapi.ActionPolicyChange{ ActionID: actionID, ActionType: "POLICY_CHANGE", diff --git a/internal/pkg/artifact/config.go b/internal/pkg/artifact/config.go index c190c02d239..fa57ca06870 100644 --- a/internal/pkg/artifact/config.go +++ b/internal/pkg/artifact/config.go @@ -11,12 +11,17 @@ import ( "github.com/elastic/elastic-agent-libs/transport/httpcommon" "github.com/elastic/elastic-agent/internal/pkg/agent/application/paths" + "github.com/elastic/elastic-agent/internal/pkg/agent/errors" + "github.com/elastic/elastic-agent/internal/pkg/config" + "github.com/elastic/elastic-agent/pkg/core/logger" ) const ( darwin = "darwin" linux = "linux" windows = "windows" + + defaultSourceURI = "https://artifacts.elastic.co/downloads/" ) // Config is a configuration used for verifier and downloader @@ -46,6 +51,52 @@ type Config struct { httpcommon.HTTPTransportSettings `config:",inline" yaml:",inline"` // Note: use anonymous struct for json inline } +type Reloader struct { + log *logger.Logger + cfg *Config +} + +func NewReloader(cfg *Config, log *logger.Logger) *Reloader { + return &Reloader{ + cfg: cfg, + log: log, + } +} + +func (r *Reloader) Reload(rawConfig *config.Config) error { + type reloadConfig struct { + // SourceURI: source of the artifacts, e.g https://artifacts.elastic.co/downloads/ + SourceURI string `json:"agent.download.sourceURI" config:"agent.download.sourceURI"` + + // FleetSourceURI: source of the artifacts, e.g https://artifacts.elastic.co/downloads/ coming from fleet which uses + // different naming. + FleetSourceURI string `json:"agent.download.source_uri" config:"agent.download.source_uri"` + } + cfg := &reloadConfig{} + if err := rawConfig.Unpack(&cfg); err != nil { + return errors.New(err, "failed to unpack config during reload") + } + + var newSourceURI string + if cfg.FleetSourceURI != "" { + // fleet configuration takes precedence + newSourceURI = cfg.FleetSourceURI + } else if cfg.SourceURI != "" { + newSourceURI = cfg.SourceURI + } + + if newSourceURI != "" { + r.log.Infof("Source URI changed from %q to %q", r.cfg.SourceURI, newSourceURI) + r.cfg.SourceURI = newSourceURI + } else { + // source uri unset, reset to default + r.log.Infof("Source URI reset from %q to %q", r.cfg.SourceURI, defaultSourceURI) + r.cfg.SourceURI = defaultSourceURI + } + + return nil +} + // DefaultConfig creates a config with pre-set default values. func DefaultConfig() *Config { transport := httpcommon.DefaultHTTPTransportSettings() @@ -56,7 +107,7 @@ func DefaultConfig() *Config { transport.Timeout = 10 * time.Minute return &Config{ - SourceURI: "https://artifacts.elastic.co/downloads/", + SourceURI: defaultSourceURI, TargetDirectory: paths.Downloads(), InstallPath: paths.Install(), HTTPTransportSettings: transport, From 3e877825b91cdde39f2ea94eabdd6275176b7b82 Mon Sep 17 00:00:00 2001 From: Michel Laterman <82832767+michel-laterman@users.noreply.github.com> Date: Tue, 26 Jul 2022 13:17:34 -0700 Subject: [PATCH 063/180] Update will cleanup unneeded artifacts. (#752) * Update will cleanup unneeded artifacts. The update process will cleanup unneeded artifacts. When an update starts all artifacts that do not have the current version number in it's name will be removed. If artifact retrieval fails, downloaded artifacts are removed. On a successful upgrade, all contents of the downloads dir will be removed. * Clean up linter warnings * Wrap errors * cleanup tests * Fix passed version * Use os.RemoveAll --- CHANGELOG.next.asciidoc | 1 + .../pkg/agent/application/upgrade/cleanup.go | 36 +++++++++++++++ .../agent/application/upgrade/cleanup_test.go | 44 +++++++++++++++++++ .../pkg/agent/application/upgrade/upgrade.go | 20 +++++++++ 4 files changed, 101 insertions(+) create mode 100644 internal/pkg/agent/application/upgrade/cleanup.go create mode 100644 internal/pkg/agent/application/upgrade/cleanup_test.go diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 75d574a23b5..2361baf73f5 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -111,6 +111,7 @@ - Allow the - char to appear as part of variable names in eql expressions. {issue}709[709] {pull}710[710] - Allow the / char in variable names in eql and transpiler. {issue}715[715] {pull}718[718] - Fix data duplication for standalone agent on Kubernetes using the default manifest {issue-beats}31512[31512] {pull}742[742] +- Agent updates will clean up unneeded artifacts. {issue}693[693] {issue}694[694] {pull}752[752] ==== New features diff --git a/internal/pkg/agent/application/upgrade/cleanup.go b/internal/pkg/agent/application/upgrade/cleanup.go new file mode 100644 index 00000000000..5e0618dfe78 --- /dev/null +++ b/internal/pkg/agent/application/upgrade/cleanup.go @@ -0,0 +1,36 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package upgrade + +import ( + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/hashicorp/go-multierror" + + "github.com/elastic/elastic-agent/internal/pkg/agent/application/paths" +) + +// preUpgradeCleanup will remove files that do not have the passed version number from the downloads directory. +func preUpgradeCleanup(version string) error { + files, err := os.ReadDir(paths.Downloads()) + if err != nil { + return fmt.Errorf("unable to read directory %q: %w", paths.Downloads(), err) + } + var rErr error + for _, file := range files { + if file.IsDir() { + continue + } + if !strings.Contains(file.Name(), version) { + if err := os.Remove(filepath.Join(paths.Downloads(), file.Name())); err != nil { + rErr = multierror.Append(rErr, fmt.Errorf("unable to remove file %q: %w", filepath.Join(paths.Downloads(), file.Name()), err)) + } + } + } + return rErr +} diff --git a/internal/pkg/agent/application/upgrade/cleanup_test.go b/internal/pkg/agent/application/upgrade/cleanup_test.go new file mode 100644 index 00000000000..736a9c42b3d --- /dev/null +++ b/internal/pkg/agent/application/upgrade/cleanup_test.go @@ -0,0 +1,44 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package upgrade + +import ( + "os" + "path/filepath" + "testing" + + "github.com/elastic/elastic-agent/internal/pkg/agent/application/paths" + + "github.com/stretchr/testify/require" +) + +func setupDir(t *testing.T) { + t.Helper() + dir := t.TempDir() + paths.SetDownloads(dir) + + err := os.WriteFile(filepath.Join(dir, "test-8.3.0-file"), []byte("hello, world!"), 0600) + require.NoError(t, err) + err = os.WriteFile(filepath.Join(dir, "test-8.4.0-file"), []byte("hello, world!"), 0600) + require.NoError(t, err) + err = os.WriteFile(filepath.Join(dir, "test-8.5.0-file"), []byte("hello, world!"), 0600) + require.NoError(t, err) + err = os.WriteFile(filepath.Join(dir, "test-hash-file"), []byte("hello, world!"), 0600) + require.NoError(t, err) +} + +func TestPreUpgradeCleanup(t *testing.T) { + setupDir(t) + err := preUpgradeCleanup("8.4.0") + require.NoError(t, err) + + files, err := os.ReadDir(paths.Downloads()) + require.NoError(t, err) + require.Len(t, files, 1) + require.Equal(t, "test-8.4.0-file", files[0].Name()) + p, err := os.ReadFile(filepath.Join(paths.Downloads(), files[0].Name())) + require.NoError(t, err) + require.Equal(t, []byte("hello, world!"), p) +} diff --git a/internal/pkg/agent/application/upgrade/upgrade.go b/internal/pkg/agent/application/upgrade/upgrade.go index 1d370cb5301..ce811036176 100644 --- a/internal/pkg/agent/application/upgrade/upgrade.go +++ b/internal/pkg/agent/application/upgrade/upgrade.go @@ -126,6 +126,11 @@ func (u *Upgrader) Upgrade(ctx context.Context, a Action, reexecNow bool) (_ ree "running under control of the systems supervisor") } + err = preUpgradeCleanup(u.agentInfo.Version()) + if err != nil { + u.log.Errorf("Unable to clean downloads dir %q before update: %v", paths.Downloads(), err) + } + if u.caps != nil { if _, err := u.caps.Apply(a); errors.Is(err, capabilities.ErrBlocked) { return nil, nil @@ -137,6 +142,11 @@ func (u *Upgrader) Upgrade(ctx context.Context, a Action, reexecNow bool) (_ ree sourceURI := u.sourceURI(a.SourceURI()) archivePath, err := u.downloadArtifact(ctx, a.Version(), sourceURI) if err != nil { + // Run the same preUpgradeCleanup task to get rid of any newly downloaded files + // This may have an issue if users are upgrading to the same version number. + if dErr := preUpgradeCleanup(u.agentInfo.Version()); dErr != nil { + u.log.Errorf("Unable to remove file after verification failure: %v", dErr) + } return nil, err } @@ -180,10 +190,20 @@ func (u *Upgrader) Upgrade(ctx context.Context, a Action, reexecNow bool) (_ ree cb := shutdownCallback(u.log, paths.Home(), release.Version(), a.Version(), release.TrimCommit(newHash)) if reexecNow { + err = os.RemoveAll(paths.Downloads()) + if err != nil { + u.log.Errorf("Unable to clean downloads dir %q after update: %v", paths.Downloads(), err) + } u.reexec.ReExec(cb) return nil, nil } + // Clean everything from the downloads dir + err = os.RemoveAll(paths.Downloads()) + if err != nil { + u.log.Errorf("Unable to clean downloads dir %q after update: %v", paths.Downloads(), err) + } + return cb, nil } From 87f3798e1481387c766ddad037139dbf51e75ba4 Mon Sep 17 00:00:00 2001 From: Victor Martinez Date: Wed, 27 Jul 2022 09:21:29 +0100 Subject: [PATCH 064/180] ci: propagate e2e-testing errors (#695) --- .ci/Jenkinsfile | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.ci/Jenkinsfile b/.ci/Jenkinsfile index 9d24c6ea810..78078f79358 100644 --- a/.ci/Jenkinsfile +++ b/.ci/Jenkinsfile @@ -256,7 +256,9 @@ pipeline { elasticAgentVersion: "${env.BEAT_VERSION}-SNAPSHOT", gitHubCheckName: "e2e-tests", gitHubCheckRepo: env.REPO, - gitHubCheckSha1: env.GIT_BASE_COMMIT) + gitHubCheckSha1: env.GIT_BASE_COMMIT, + propagate: true, + wait: true) } } } From e87810336ea3f244feb76ee6130b365a67270b24 Mon Sep 17 00:00:00 2001 From: Elastic Machine Date: Wed, 27 Jul 2022 22:50:09 +0930 Subject: [PATCH 065/180] [Release] add-backport-next (#784) --- .mergify.yml | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/.mergify.yml b/.mergify.yml index 00b49806e47..3fe46362854 100644 --- a/.mergify.yml +++ b/.mergify.yml @@ -207,3 +207,16 @@ pull_request_rules: labels: - "backport" title: "[{{ destination_branch }}](backport #{{ number }}) {{ title }}" + - name: backport patches to 8.4 branch + conditions: + - merged + - label=backport-v8.4.0 + actions: + backport: + assignees: + - "{{ author }}" + branches: + - "8.4" + labels: + - "backport" + title: "[{{ destination_branch }}](backport #{{ number }}) {{ title }}" From 317e03116aa919d69be97242207ad11a28c826aa Mon Sep 17 00:00:00 2001 From: Pier-Hugues Pellerin Date: Wed, 27 Jul 2022 15:47:40 -0400 Subject: [PATCH 066/180] Update main to 8.5.0 (#793) --- version/version.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/version/version.go b/version/version.go index 07f3cb8046a..f5101a34efa 100644 --- a/version/version.go +++ b/version/version.go @@ -4,4 +4,4 @@ package version -const defaultBeatVersion = "8.4.0" +const defaultBeatVersion = "8.5.0" From 51e5b0a1385e1b71dc30d37aebe085782ccfd49b Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Wed, 27 Jul 2022 16:12:49 -0400 Subject: [PATCH 067/180] [Automation] Update go release version to 1.17.12 (#726) Co-authored-by: apmmachine --- .go-version | 2 +- Dockerfile | 2 +- version/docs/version.asciidoc | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.go-version b/.go-version index ada2e4fce87..4512502b629 100644 --- a/.go-version +++ b/.go-version @@ -1 +1 @@ -1.17.10 +1.17.12 diff --git a/Dockerfile b/Dockerfile index 709dcbc7bef..6125555c8c0 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -ARG GO_VERSION=1.17.10 +ARG GO_VERSION=1.17.12 FROM circleci/golang:${GO_VERSION} diff --git a/version/docs/version.asciidoc b/version/docs/version.asciidoc index 9d0056a0c38..e33eb13d229 100644 --- a/version/docs/version.asciidoc +++ b/version/docs/version.asciidoc @@ -1,6 +1,6 @@ :stack-version: 8.3.0 :doc-branch: main -:go-version: 1.17.10 +:go-version: 1.17.12 :release-state: unreleased :python: 3.7 :docker: 1.12 From 817041191cb77f12f0ba5b54f670d07576ff1cdf Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Thu, 28 Jul 2022 01:42:46 -0400 Subject: [PATCH 068/180] [Automation] Update elastic stack version to 8.4.0-60171339 for testing (#799) Co-authored-by: apmmachine --- testing/environments/snapshot.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/testing/environments/snapshot.yml b/testing/environments/snapshot.yml index b8b6792b912..3b747702258 100644 --- a/testing/environments/snapshot.yml +++ b/testing/environments/snapshot.yml @@ -3,7 +3,7 @@ version: '2.3' services: elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch:8.4.0-d058e92f-SNAPSHOT + image: docker.elastic.co/elasticsearch/elasticsearch:8.4.0-60171339-SNAPSHOT # When extend is used it merges healthcheck.tests, see: # https://github.com/docker/compose/issues/8962 # healthcheck: @@ -42,7 +42,7 @@ services: - ./docker/logstash/pki:/etc/pki:ro kibana: - image: docker.elastic.co/kibana/kibana:8.4.0-d058e92f-SNAPSHOT + image: docker.elastic.co/kibana/kibana:8.4.0-60171339-SNAPSHOT environment: - "ELASTICSEARCH_USERNAME=kibana_system_user" - "ELASTICSEARCH_PASSWORD=testing" From 7a88635a793b3e2ae5301fe9fd5463fb1261c01f Mon Sep 17 00:00:00 2001 From: Florian Lehner Date: Fri, 29 Jul 2022 15:08:05 +0200 Subject: [PATCH 069/180] update dependency elastic/go-structform from v0.0.9 to v0.0.10 (#802) Signed-off-by: Florian Lehner --- NOTICE.txt | 4 ++-- go.mod | 2 +- go.sum | 3 ++- 3 files changed, 5 insertions(+), 4 deletions(-) diff --git a/NOTICE.txt b/NOTICE.txt index 8b5c887cbe4..3949233c361 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -8227,11 +8227,11 @@ Contents of probable licence file $GOMODCACHE/github.com/docker/go-connections@v -------------------------------------------------------------------------------- Dependency : github.com/elastic/go-structform -Version: v0.0.9 +Version: v0.0.10 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/elastic/go-structform@v0.0.9/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/elastic/go-structform@v0.0.10/LICENSE: Apache License Version 2.0, January 2004 diff --git a/go.mod b/go.mod index 95044f69abc..dd1d29345e3 100644 --- a/go.mod +++ b/go.mod @@ -72,7 +72,7 @@ require ( github.com/docker/distribution v2.8.1+incompatible // indirect github.com/docker/docker v20.10.12+incompatible // indirect github.com/docker/go-connections v0.4.0 // indirect - github.com/elastic/go-structform v0.0.9 // indirect + github.com/elastic/go-structform v0.0.10 // indirect github.com/elastic/go-windows v1.0.1 // indirect github.com/elastic/gosigar v0.14.2 // indirect github.com/evanphx/json-patch v4.12.0+incompatible // indirect diff --git a/go.sum b/go.sum index 5026651a90b..4b63a456695 100644 --- a/go.sum +++ b/go.sum @@ -397,8 +397,9 @@ github.com/elastic/go-elasticsearch/v8 v8.0.0-20210317102009-a9d74cec0186/go.mod github.com/elastic/go-licenser v0.3.1/go.mod h1:D8eNQk70FOCVBl3smCGQt/lv7meBeQno2eI1S5apiHQ= github.com/elastic/go-licenser v0.4.0 h1:jLq6A5SilDS/Iz1ABRkO6BHy91B9jBora8FwGRsDqUI= github.com/elastic/go-licenser v0.4.0/go.mod h1:V56wHMpmdURfibNBggaSBfqgPxyT1Tldns1i87iTEvU= -github.com/elastic/go-structform v0.0.9 h1:HpcS7xljL4kSyUfDJ8cXTJC6rU5ChL1wYb6cx3HLD+o= github.com/elastic/go-structform v0.0.9/go.mod h1:CZWf9aIRYY5SuKSmOhtXScE5uQiLZNqAFnwKR4OrIM4= +github.com/elastic/go-structform v0.0.10 h1:oy08o/Ih2hHTkNcRY/1HhaYvIp5z6t8si8gnCJPDo1w= +github.com/elastic/go-structform v0.0.10/go.mod h1:CZWf9aIRYY5SuKSmOhtXScE5uQiLZNqAFnwKR4OrIM4= github.com/elastic/go-sysinfo v1.1.1/go.mod h1:i1ZYdU10oLNfRzq4vq62BEwD2fH8KaWh6eh0ikPT9F0= github.com/elastic/go-sysinfo v1.7.1 h1:Wx4DSARcKLllpKT2TnFVdSUJOsybqMYCNQZq1/wO+s0= github.com/elastic/go-sysinfo v1.7.1/go.mod h1:i1ZYdU10oLNfRzq4vq62BEwD2fH8KaWh6eh0ikPT9F0= From 74ce2ba8f7643280e2d5441018e44c10c93dd4e9 Mon Sep 17 00:00:00 2001 From: Michal Pristas Date: Mon, 1 Aug 2022 08:50:04 +0200 Subject: [PATCH 070/180] Fix unpacking of artifact config (#776) Fix unpacking of artifact config (#776) --- internal/pkg/agent/application/local_mode.go | 4 +- internal/pkg/artifact/config.go | 84 +++++- internal/pkg/artifact/config_test.go | 247 ++++++++++++++++++ .../artifact/download/snapshot/downloader.go | 13 +- 4 files changed, 337 insertions(+), 11 deletions(-) create mode 100644 internal/pkg/artifact/config_test.go diff --git a/internal/pkg/agent/application/local_mode.go b/internal/pkg/agent/application/local_mode.go index 29f311fe582..f06949bcba1 100644 --- a/internal/pkg/agent/application/local_mode.go +++ b/internal/pkg/agent/application/local_mode.go @@ -22,6 +22,7 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/agent/configuration" "github.com/elastic/elastic-agent/internal/pkg/agent/errors" "github.com/elastic/elastic-agent/internal/pkg/agent/operation" + "github.com/elastic/elastic-agent/internal/pkg/artifact" "github.com/elastic/elastic-agent/internal/pkg/capabilities" "github.com/elastic/elastic-agent/internal/pkg/composable" "github.com/elastic/elastic-agent/internal/pkg/config" @@ -131,6 +132,7 @@ func newLocal( }, caps, monitor, + artifact.NewReloader(cfg.Settings.DownloadConfig, log), ) if err != nil { return nil, err @@ -203,7 +205,7 @@ func (l *Local) AgentInfo() *info.AgentInfo { } func discoverer(patterns ...string) discoverFunc { - var p []string + p := make([]string, 0, len(patterns)) for _, newP := range patterns { if len(newP) == 0 { continue diff --git a/internal/pkg/artifact/config.go b/internal/pkg/artifact/config.go index fa57ca06870..d88031e5de5 100644 --- a/internal/pkg/artifact/config.go +++ b/internal/pkg/artifact/config.go @@ -9,6 +9,7 @@ import ( "strings" "time" + c "github.com/elastic/elastic-agent-libs/config" "github.com/elastic/elastic-agent-libs/transport/httpcommon" "github.com/elastic/elastic-agent/internal/pkg/agent/application/paths" "github.com/elastic/elastic-agent/internal/pkg/agent/errors" @@ -64,6 +65,42 @@ func NewReloader(cfg *Config, log *logger.Logger) *Reloader { } func (r *Reloader) Reload(rawConfig *config.Config) error { + if err := r.reloadConfig(rawConfig); err != nil { + return errors.New(err, "failed to reload config") + } + + if err := r.reloadSourceURI(rawConfig); err != nil { + return errors.New(err, "failed to reload source URI") + } + + return nil +} + +func (r *Reloader) reloadConfig(rawConfig *config.Config) error { + type reloadConfig struct { + C *Config `json:"agent.download" config:"agent.download"` + } + tmp := &reloadConfig{ + C: DefaultConfig(), + } + if err := rawConfig.Unpack(&tmp); err != nil { + return err + } + + *(r.cfg) = Config{ + OperatingSystem: tmp.C.OperatingSystem, + Architecture: tmp.C.Architecture, + SourceURI: tmp.C.SourceURI, + TargetDirectory: tmp.C.TargetDirectory, + InstallPath: tmp.C.InstallPath, + DropPath: tmp.C.DropPath, + HTTPTransportSettings: tmp.C.HTTPTransportSettings, + } + + return nil +} + +func (r *Reloader) reloadSourceURI(rawConfig *config.Config) error { type reloadConfig struct { // SourceURI: source of the artifacts, e.g https://artifacts.elastic.co/downloads/ SourceURI string `json:"agent.download.sourceURI" config:"agent.download.sourceURI"` @@ -78,11 +115,11 @@ func (r *Reloader) Reload(rawConfig *config.Config) error { } var newSourceURI string - if cfg.FleetSourceURI != "" { + if fleetURI := strings.TrimSpace(cfg.FleetSourceURI); fleetURI != "" { // fleet configuration takes precedence - newSourceURI = cfg.FleetSourceURI - } else if cfg.SourceURI != "" { - newSourceURI = cfg.SourceURI + newSourceURI = fleetURI + } else if sourceURI := strings.TrimSpace(cfg.SourceURI); sourceURI != "" { + newSourceURI = sourceURI } if newSourceURI != "" { @@ -148,3 +185,42 @@ func (c *Config) Arch() string { c.Architecture = arch return c.Architecture } + +// Unpack reads a config object into the settings. +func (c *Config) Unpack(cfg *c.C) error { + tmp := struct { + OperatingSystem string `json:"-" config:",ignore"` + Architecture string `json:"-" config:",ignore"` + SourceURI string `json:"sourceURI" config:"sourceURI"` + TargetDirectory string `json:"targetDirectory" config:"target_directory"` + InstallPath string `yaml:"installPath" config:"install_path"` + DropPath string `yaml:"dropPath" config:"drop_path"` + }{ + OperatingSystem: c.OperatingSystem, + Architecture: c.Architecture, + SourceURI: c.SourceURI, + TargetDirectory: c.TargetDirectory, + InstallPath: c.InstallPath, + DropPath: c.DropPath, + } + + if err := cfg.Unpack(&tmp); err != nil { + return err + } + + transport := DefaultConfig().HTTPTransportSettings + if err := cfg.Unpack(&transport); err != nil { + return err + } + + *c = Config{ + OperatingSystem: tmp.OperatingSystem, + Architecture: tmp.Architecture, + SourceURI: tmp.SourceURI, + TargetDirectory: tmp.TargetDirectory, + InstallPath: tmp.InstallPath, + DropPath: tmp.DropPath, + HTTPTransportSettings: transport, + } + return nil +} diff --git a/internal/pkg/artifact/config_test.go b/internal/pkg/artifact/config_test.go new file mode 100644 index 00000000000..3a9a694b757 --- /dev/null +++ b/internal/pkg/artifact/config_test.go @@ -0,0 +1,247 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package artifact + +import ( + "testing" + "time" + + "github.com/elastic/elastic-agent/internal/pkg/config" + "github.com/elastic/elastic-agent/pkg/core/logger" + "github.com/stretchr/testify/require" +) + +func TestReload(t *testing.T) { + type testCase struct { + input string + initialConfig *Config + expectedSourceURI string + expectedTargetDirectory string + expectedInstallDirectory string + expectedDropDirectory string + expectedFingerprint string + expectedTLS bool + expectedTLSEnabled bool + expectedDisableProxy bool + expectedTimeout time.Duration + } + defaultValues := DefaultConfig() + testCases := []testCase{ + { + input: `agent.download: + sourceURI: "testing.uri" + target_directory: "a/b/c" + install_path: "i/p" + drop_path: "d/p" + proxy_disable: true + timeout: 33s + ssl.enabled: true + ssl.ca_trusted_fingerprint: "my_finger_print" +`, + initialConfig: DefaultConfig(), + expectedSourceURI: "testing.uri", + expectedTargetDirectory: "a/b/c", + expectedInstallDirectory: "i/p", + expectedDropDirectory: "d/p", + expectedFingerprint: "my_finger_print", + expectedTLS: true, + expectedTLSEnabled: true, + expectedDisableProxy: true, + expectedTimeout: 33 * time.Second, + }, + { + input: `agent.download: + sourceURI: "testing.uri" +`, + initialConfig: DefaultConfig(), + expectedSourceURI: "testing.uri", + expectedTargetDirectory: defaultValues.TargetDirectory, + expectedInstallDirectory: defaultValues.InstallPath, + expectedDropDirectory: defaultValues.DropPath, + expectedFingerprint: "", + expectedTLS: defaultValues.TLS != nil, + expectedTLSEnabled: false, + expectedDisableProxy: defaultValues.Proxy.Disable, + expectedTimeout: defaultValues.Timeout, + }, + { + input: `agent.download: + sourceURI: "" +`, + initialConfig: &Config{ + SourceURI: "testing.uri", + HTTPTransportSettings: defaultValues.HTTPTransportSettings, + }, + expectedSourceURI: defaultValues.SourceURI, // fallback to default when set to empty + expectedTargetDirectory: defaultValues.TargetDirectory, + expectedInstallDirectory: defaultValues.InstallPath, + expectedDropDirectory: defaultValues.DropPath, + expectedFingerprint: "", + expectedTLS: defaultValues.TLS != nil, + expectedTLSEnabled: false, + expectedDisableProxy: defaultValues.Proxy.Disable, + expectedTimeout: defaultValues.Timeout, + }, + { + input: ``, + initialConfig: &Config{ + SourceURI: "testing.uri", + HTTPTransportSettings: defaultValues.HTTPTransportSettings, + }, + expectedSourceURI: defaultValues.SourceURI, // fallback to default when not set + expectedTargetDirectory: defaultValues.TargetDirectory, + expectedInstallDirectory: defaultValues.InstallPath, + expectedDropDirectory: defaultValues.DropPath, + expectedFingerprint: "", + expectedTLS: defaultValues.TLS != nil, + expectedTLSEnabled: false, + expectedDisableProxy: defaultValues.Proxy.Disable, + expectedTimeout: defaultValues.Timeout, + }, + { + input: `agent.download: + sourceURI: " " +`, + initialConfig: &Config{ + SourceURI: "testing.uri", + HTTPTransportSettings: defaultValues.HTTPTransportSettings, + }, + expectedSourceURI: defaultValues.SourceURI, // fallback to default when set to whitespace + expectedTargetDirectory: defaultValues.TargetDirectory, + expectedInstallDirectory: defaultValues.InstallPath, + expectedDropDirectory: defaultValues.DropPath, + expectedFingerprint: "", + expectedTLS: defaultValues.TLS != nil, + expectedTLSEnabled: false, + expectedDisableProxy: defaultValues.Proxy.Disable, + expectedTimeout: defaultValues.Timeout, + }, + { + input: `agent.download: + source_uri: " " +`, + initialConfig: &Config{ + SourceURI: "testing.uri", + HTTPTransportSettings: defaultValues.HTTPTransportSettings, + }, + expectedSourceURI: defaultValues.SourceURI, // fallback to default when set to whitespace + expectedTargetDirectory: defaultValues.TargetDirectory, + expectedInstallDirectory: defaultValues.InstallPath, + expectedDropDirectory: defaultValues.DropPath, + expectedFingerprint: "", + expectedTLS: defaultValues.TLS != nil, + expectedTLSEnabled: false, + expectedDisableProxy: defaultValues.Proxy.Disable, + expectedTimeout: defaultValues.Timeout, + }, + { + input: `agent.download: + source_uri: " " + sourceURI: " " +`, + initialConfig: DefaultConfig(), + expectedSourceURI: defaultValues.SourceURI, // fallback to default when set to whitespace + expectedTargetDirectory: defaultValues.TargetDirectory, + expectedInstallDirectory: defaultValues.InstallPath, + expectedDropDirectory: defaultValues.DropPath, + expectedFingerprint: "", + expectedTLS: defaultValues.TLS != nil, + expectedTLSEnabled: false, + expectedDisableProxy: defaultValues.Proxy.Disable, + expectedTimeout: defaultValues.Timeout, + }, + { + input: ``, + initialConfig: &Config{ + SourceURI: "testing.uri", + HTTPTransportSettings: defaultValues.HTTPTransportSettings, + }, + expectedSourceURI: defaultValues.SourceURI, + expectedTargetDirectory: defaultValues.TargetDirectory, + expectedInstallDirectory: defaultValues.InstallPath, + expectedDropDirectory: defaultValues.DropPath, + expectedFingerprint: "", + expectedTLS: defaultValues.TLS != nil, + expectedTLSEnabled: false, + expectedDisableProxy: defaultValues.Proxy.Disable, + expectedTimeout: defaultValues.Timeout, + }, + { + input: `agent.download: + source_uri: " " + sourceURI: "testing.uri" +`, + initialConfig: DefaultConfig(), + expectedSourceURI: "testing.uri", + expectedTargetDirectory: defaultValues.TargetDirectory, + expectedInstallDirectory: defaultValues.InstallPath, + expectedDropDirectory: defaultValues.DropPath, + expectedFingerprint: "", + expectedTLS: defaultValues.TLS != nil, + expectedTLSEnabled: false, + expectedDisableProxy: defaultValues.Proxy.Disable, + expectedTimeout: defaultValues.Timeout, + }, + { + input: `agent.download: + source_uri: "testing.uri" + sourceURI: " " +`, + initialConfig: DefaultConfig(), + expectedSourceURI: "testing.uri", + expectedTargetDirectory: defaultValues.TargetDirectory, + expectedInstallDirectory: defaultValues.InstallPath, + expectedDropDirectory: defaultValues.DropPath, + expectedFingerprint: "", + expectedTLS: defaultValues.TLS != nil, + expectedTLSEnabled: false, + expectedDisableProxy: defaultValues.Proxy.Disable, + expectedTimeout: defaultValues.Timeout, + }, + { + input: `agent.download: + source_uri: "testing.uri" + sourceURI: "another.uri" +`, + initialConfig: DefaultConfig(), + expectedSourceURI: "testing.uri", + expectedTargetDirectory: defaultValues.TargetDirectory, + expectedInstallDirectory: defaultValues.InstallPath, + expectedDropDirectory: defaultValues.DropPath, + expectedFingerprint: "", + expectedTLS: defaultValues.TLS != nil, + expectedTLSEnabled: false, + expectedDisableProxy: defaultValues.Proxy.Disable, + expectedTimeout: defaultValues.Timeout, + }, + } + + l, _ := logger.NewTesting("t") + for _, tc := range testCases { + cfg := tc.initialConfig + reloader := NewReloader(cfg, l) + + c, err := config.NewConfigFrom(tc.input) + require.NoError(t, err) + + require.NoError(t, reloader.Reload(c)) + + require.Equal(t, tc.expectedSourceURI, cfg.SourceURI) + require.Equal(t, tc.expectedTargetDirectory, cfg.TargetDirectory) + require.Equal(t, tc.expectedInstallDirectory, cfg.InstallPath) + require.Equal(t, tc.expectedDropDirectory, cfg.DropPath) + require.Equal(t, tc.expectedTimeout, cfg.Timeout) + + require.Equal(t, tc.expectedDisableProxy, cfg.Proxy.Disable) + + if tc.expectedTLS { + require.NotNil(t, cfg.TLS) + require.Equal(t, tc.expectedTLSEnabled, *cfg.TLS.Enabled) + require.Equal(t, tc.expectedFingerprint, cfg.TLS.CATrustedFingerprint) + } else { + require.Nil(t, cfg.TLS) + } + } +} diff --git a/internal/pkg/artifact/download/snapshot/downloader.go b/internal/pkg/artifact/download/snapshot/downloader.go index 2fbe027ae4b..c3680147927 100644 --- a/internal/pkg/artifact/download/snapshot/downloader.go +++ b/internal/pkg/artifact/download/snapshot/downloader.go @@ -34,12 +34,13 @@ func snapshotConfig(config *artifact.Config, versionOverride string) (*artifact. } return &artifact.Config{ - OperatingSystem: config.OperatingSystem, - Architecture: config.Architecture, - SourceURI: snapshotURI, - TargetDirectory: config.TargetDirectory, - InstallPath: config.InstallPath, - DropPath: config.DropPath, + OperatingSystem: config.OperatingSystem, + Architecture: config.Architecture, + SourceURI: snapshotURI, + TargetDirectory: config.TargetDirectory, + InstallPath: config.InstallPath, + DropPath: config.DropPath, + HTTPTransportSettings: config.HTTPTransportSettings, }, nil } From 32afc73e2ead09d2c2b71850af603f173749c082 Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Tue, 2 Aug 2022 01:35:50 -0400 Subject: [PATCH 071/180] [Automation] Update elastic stack version to 8.5.0-c54c3404 for testing (#826) Co-authored-by: apmmachine --- testing/environments/snapshot.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/testing/environments/snapshot.yml b/testing/environments/snapshot.yml index 3b747702258..6450c3c6639 100644 --- a/testing/environments/snapshot.yml +++ b/testing/environments/snapshot.yml @@ -3,7 +3,7 @@ version: '2.3' services: elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch:8.4.0-60171339-SNAPSHOT + image: docker.elastic.co/elasticsearch/elasticsearch:8.5.0-c54c3404-SNAPSHOT # When extend is used it merges healthcheck.tests, see: # https://github.com/docker/compose/issues/8962 # healthcheck: @@ -42,7 +42,7 @@ services: - ./docker/logstash/pki:/etc/pki:ro kibana: - image: docker.elastic.co/kibana/kibana:8.4.0-60171339-SNAPSHOT + image: docker.elastic.co/kibana/kibana:8.5.0-c54c3404-SNAPSHOT environment: - "ELASTICSEARCH_USERNAME=kibana_system_user" - "ELASTICSEARCH_PASSWORD=testing" From bea990fbbf785a6c9eb998b54822f2b71d657482 Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Tue, 2 Aug 2022 05:51:16 -0400 Subject: [PATCH 072/180] [Automation] Update elastic stack version to 8.5.0-7dbc10f8 for testing (#833) Co-authored-by: apmmachine --- testing/environments/snapshot.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/testing/environments/snapshot.yml b/testing/environments/snapshot.yml index 6450c3c6639..8d629d8e77b 100644 --- a/testing/environments/snapshot.yml +++ b/testing/environments/snapshot.yml @@ -3,7 +3,7 @@ version: '2.3' services: elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch:8.5.0-c54c3404-SNAPSHOT + image: docker.elastic.co/elasticsearch/elasticsearch:8.5.0-7dbc10f8-SNAPSHOT # When extend is used it merges healthcheck.tests, see: # https://github.com/docker/compose/issues/8962 # healthcheck: @@ -42,7 +42,7 @@ services: - ./docker/logstash/pki:/etc/pki:ro kibana: - image: docker.elastic.co/kibana/kibana:8.5.0-c54c3404-SNAPSHOT + image: docker.elastic.co/kibana/kibana:8.5.0-7dbc10f8-SNAPSHOT environment: - "ELASTICSEARCH_USERNAME=kibana_system_user" - "ELASTICSEARCH_PASSWORD=testing" From 12c7d15debf038183b274d0092859cdd281fd50d Mon Sep 17 00:00:00 2001 From: Aleksandr Maus Date: Tue, 2 Aug 2022 13:23:11 -0400 Subject: [PATCH 073/180] Fix RPM/DEB clean install (#816) * Fix RPM/DEB clean install * Improve the post install script * Do not try to copy the state files if the agent directory is the same, this causes the error. * Check the existance of symlink instead of the file it is pointing to for the state file migration. * Update check for symlink existance for the cases where the symlink points to non-existent file --- .../templates/linux/postinstall.sh.tmpl | 38 ++++++++++++------- 1 file changed, 25 insertions(+), 13 deletions(-) diff --git a/dev-tools/packaging/templates/linux/postinstall.sh.tmpl b/dev-tools/packaging/templates/linux/postinstall.sh.tmpl index 083ebb91060..d96f21a8629 100644 --- a/dev-tools/packaging/templates/linux/postinstall.sh.tmpl +++ b/dev-tools/packaging/templates/linux/postinstall.sh.tmpl @@ -3,16 +3,26 @@ set -e symlink="/usr/share/elastic-agent/bin/elastic-agent" -old_agent_dir="$( dirname "$(readlink -f -- "$symlink")" )" +old_agent_dir="" + +# check if $symlink exists for the previous install +# and derive the old agent directory +if test -L "$symlink"; then + resolved_symlink="$(readlink -f -- "$symlink")" + # check if it is resolved to non empty string + if ! [ -z "$resolved_symlink" ]; then + old_agent_dir="$( dirname "$resolved_symlink" )" + fi +fi commit_hash="{{ commit_short }}" -yml_path="$old_agent_dir/state.yml" -enc_path="$old_agent_dir/state.enc" +new_agent_dir="/var/lib/elastic-agent/data/elastic-agent-$commit_hash" -new_agent_dir="$( dirname "$old_agent_dir")/elastic-agent-$commit_hash" - -if ! [[ "$old_agent_dir" -ef "$new_agent_dir" ]]; then +# copy the state files if there was a previous agent install +if ! [ -z "$old_agent_dir" ] && ! [ "$old_agent_dir" -ef "$new_agent_dir" ]; then + yml_path="$old_agent_dir/state.yml" + enc_path="$old_agent_dir/state.enc" echo "migrate state from $old_agent_dir to $new_agent_dir" if test -f "$yml_path"; then @@ -24,15 +34,17 @@ if ! [[ "$old_agent_dir" -ef "$new_agent_dir" ]]; then echo "found "$enc_path", copy to "$new_agent_dir"." cp "$enc_path" "$new_agent_dir" fi +fi - if test -f "$symlink"; then - echo "found symlink $symlink, unlink" - unlink "$symlink" - fi - - echo "create symlink "$symlink" to "$new_agent_dir/elastic-agent"" - ln -s "$new_agent_dir/elastic-agent" "$symlink" +# delete symlink if exists +if test -L "$symlink"; then + echo "found symlink $symlink, unlink" + unlink "$symlink" fi +# create symlink to the new agent +echo "create symlink "$symlink" to "$new_agent_dir/elastic-agent"" +ln -s "$new_agent_dir/elastic-agent" "$symlink" + systemctl daemon-reload 2> /dev/null exit 0 From ff8de859f9bbef638e35236b8f8e98d5db2dcc0e Mon Sep 17 00:00:00 2001 From: Florian Lehner Date: Thu, 4 Aug 2022 19:13:48 +0200 Subject: [PATCH 074/180] fix path for auto generated spec file (#859) Signed-off-by: Florian Lehner --- dev-tools/cmd/buildspec/buildspec.go | 2 +- internal/pkg/agent/program/supported.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/dev-tools/cmd/buildspec/buildspec.go b/dev-tools/cmd/buildspec/buildspec.go index ea16fbe6968..60d2309ce08 100644 --- a/dev-tools/cmd/buildspec/buildspec.go +++ b/dev-tools/cmd/buildspec/buildspec.go @@ -34,7 +34,7 @@ func init() { var tmpl = template.Must(template.New("specs").Parse(` {{ .License }} -// Code generated by elastic-agent/internals/dev-tools/buildspec/buildspec.go - DO NOT EDIT. +// Code generated by elastic-agent/dev-tools/cmd/buildspec/buildspec.go - DO NOT EDIT. package program diff --git a/internal/pkg/agent/program/supported.go b/internal/pkg/agent/program/supported.go index 985ebb03709..c6d78b20f84 100644 --- a/internal/pkg/agent/program/supported.go +++ b/internal/pkg/agent/program/supported.go @@ -2,7 +2,7 @@ // or more contributor license agreements. Licensed under the Elastic License; // you may not use this file except in compliance with the Elastic License. -// Code generated by elastic-agent/internals/dev-tools/buildspec/buildspec.go - DO NOT EDIT. +// Code generated by elastic-agent/dev-tools/cmd/buildspec/buildspec.go - DO NOT EDIT. package program From 6d830e88d5c3e434b7ca082e45d7fb32dda78159 Mon Sep 17 00:00:00 2001 From: Michal Pristas Date: Fri, 5 Aug 2022 08:37:40 +0200 Subject: [PATCH 075/180] Reload downloader client on config change (#848) Reload downloader client on config change (#848) --- internal/pkg/agent/application/local_mode.go | 6 +++ .../pkg/agent/application/managed_mode.go | 6 +++ .../pipeline/emitter/controller.go | 6 +-- .../application/pipeline/emitter/emitter.go | 2 +- .../application/pipeline/router/router.go | 23 +++++++++ .../pipeline/stream/operator_stream.go | 11 +++++ internal/pkg/agent/operation/operator.go | 49 +++++++++++++++++-- internal/pkg/artifact/config.go | 22 +++++++-- .../artifact/download/composed/downloader.go | 16 ++++++ .../artifact/download/composed/verifier.go | 18 ++++++- .../pkg/artifact/download/http/downloader.go | 31 ++++++++++-- .../pkg/artifact/download/http/verifier.go | 18 +++++++ internal/pkg/artifact/download/reloadable.go | 14 ++++++ .../artifact/download/snapshot/downloader.go | 39 ++++++++++++++- .../artifact/download/snapshot/verifier.go | 37 +++++++++++++- 15 files changed, 276 insertions(+), 22 deletions(-) create mode 100644 internal/pkg/artifact/download/reloadable.go diff --git a/internal/pkg/agent/application/local_mode.go b/internal/pkg/agent/application/local_mode.go index f06949bcba1..e6496b44860 100644 --- a/internal/pkg/agent/application/local_mode.go +++ b/internal/pkg/agent/application/local_mode.go @@ -119,6 +119,11 @@ func newLocal( return nil, errors.New(err, "failed to initialize composable controller") } + routerArtifactReloader, ok := router.(emitter.Reloader) + if !ok { + return nil, errors.New("router not capable of artifact reload") // Needed for client reloading + } + discover := discoverer(pathConfigFile, cfg.Settings.Path, externalConfigsGlob()) emit, err := emitter.New( localApplication.bgContext, @@ -133,6 +138,7 @@ func newLocal( caps, monitor, artifact.NewReloader(cfg.Settings.DownloadConfig, log), + routerArtifactReloader, ) if err != nil { return nil, err diff --git a/internal/pkg/agent/application/managed_mode.go b/internal/pkg/agent/application/managed_mode.go index 3f98e78fd62..08c43aeeca3 100644 --- a/internal/pkg/agent/application/managed_mode.go +++ b/internal/pkg/agent/application/managed_mode.go @@ -146,6 +146,11 @@ func newManaged( return nil, errors.New(err, "failed to initialize composable controller") } + routerArtifactReloader, ok := router.(emitter.Reloader) + if !ok { + return nil, errors.New("router not capable of artifact reload") // Needed for client reloading + } + emit, err := emitter.New( managedApplication.bgContext, log, @@ -159,6 +164,7 @@ func newManaged( caps, monitor, artifact.NewReloader(cfg.Settings.DownloadConfig, log), + routerArtifactReloader, ) if err != nil { return nil, err diff --git a/internal/pkg/agent/application/pipeline/emitter/controller.go b/internal/pkg/agent/application/pipeline/emitter/controller.go index 7f83961586c..1085046ea64 100644 --- a/internal/pkg/agent/application/pipeline/emitter/controller.go +++ b/internal/pkg/agent/application/pipeline/emitter/controller.go @@ -21,7 +21,7 @@ import ( "github.com/elastic/elastic-agent/pkg/core/logger" ) -type reloadable interface { +type Reloader interface { Reload(cfg *config.Config) error } @@ -32,7 +32,7 @@ type Controller struct { controller composable.Controller router pipeline.Router modifiers *pipeline.ConfigModifiers - reloadables []reloadable + reloadables []Reloader caps capabilities.Capability // state @@ -51,7 +51,7 @@ func NewController( router pipeline.Router, modifiers *pipeline.ConfigModifiers, caps capabilities.Capability, - reloadables ...reloadable, + reloadables ...Reloader, ) *Controller { init, _ := transpiler.NewVars(map[string]interface{}{}, nil) diff --git a/internal/pkg/agent/application/pipeline/emitter/emitter.go b/internal/pkg/agent/application/pipeline/emitter/emitter.go index 7855fb51602..4a42c99d620 100644 --- a/internal/pkg/agent/application/pipeline/emitter/emitter.go +++ b/internal/pkg/agent/application/pipeline/emitter/emitter.go @@ -22,7 +22,7 @@ import ( ) // New creates a new emitter function. -func New(ctx context.Context, log *logger.Logger, agentInfo *info.AgentInfo, controller composable.Controller, router pipeline.Router, modifiers *pipeline.ConfigModifiers, caps capabilities.Capability, reloadables ...reloadable) (pipeline.EmitterFunc, error) { +func New(ctx context.Context, log *logger.Logger, agentInfo *info.AgentInfo, controller composable.Controller, router pipeline.Router, modifiers *pipeline.ConfigModifiers, caps capabilities.Capability, reloadables ...Reloader) (pipeline.EmitterFunc, error) { log.Debugf("Supported programs: %s", strings.Join(program.KnownProgramNames(), ", ")) ctrl := NewController(log, agentInfo, controller, router, modifiers, caps, reloadables...) diff --git a/internal/pkg/agent/application/pipeline/router/router.go b/internal/pkg/agent/application/pipeline/router/router.go index e1f1d63c8b5..274089bbc60 100644 --- a/internal/pkg/agent/application/pipeline/router/router.go +++ b/internal/pkg/agent/application/pipeline/router/router.go @@ -11,8 +11,10 @@ import ( "time" "github.com/elastic/elastic-agent/internal/pkg/agent/application/pipeline" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/pipeline/emitter" "github.com/elastic/elastic-agent/internal/pkg/agent/configrequest" "github.com/elastic/elastic-agent/internal/pkg/agent/program" + "github.com/elastic/elastic-agent/internal/pkg/config" "github.com/elastic/elastic-agent/internal/pkg/sorted" "github.com/elastic/elastic-agent/pkg/core/logger" ) @@ -35,6 +37,27 @@ func New(log *logger.Logger, factory pipeline.StreamFunc) (pipeline.Router, erro return &router{log: log, streamFactory: factory, routes: sorted.NewSet()}, nil } +func (r *router) Reload(c *config.Config) error { + keys := r.routes.Keys() + for _, key := range keys { + route, found := r.routes.Get(key) + if !found { + continue + } + + routeReloader, ok := route.(emitter.Reloader) + if !ok { + continue + } + + if err := routeReloader.Reload(c); err != nil { + return err + } + } + + return nil +} + func (r *router) Routes() *sorted.Set { return r.routes } diff --git a/internal/pkg/agent/application/pipeline/stream/operator_stream.go b/internal/pkg/agent/application/pipeline/stream/operator_stream.go index ee4ee44079e..9216e12fe82 100644 --- a/internal/pkg/agent/application/pipeline/stream/operator_stream.go +++ b/internal/pkg/agent/application/pipeline/stream/operator_stream.go @@ -10,8 +10,10 @@ import ( "go.elastic.co/apm" "github.com/elastic/elastic-agent/internal/pkg/agent/application/pipeline" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/pipeline/emitter" "github.com/elastic/elastic-agent/internal/pkg/agent/configrequest" "github.com/elastic/elastic-agent/internal/pkg/agent/program" + "github.com/elastic/elastic-agent/internal/pkg/config" "github.com/elastic/elastic-agent/internal/pkg/core/state" "github.com/elastic/elastic-agent/pkg/core/logger" ) @@ -29,6 +31,15 @@ type specer interface { Specs() map[string]program.Spec } +func (b *operatorStream) Reload(c *config.Config) error { + r, ok := b.configHandler.(emitter.Reloader) + if !ok { + return nil + } + + return r.Reload(c) +} + func (b *operatorStream) Close() error { return b.configHandler.Close() } diff --git a/internal/pkg/agent/operation/operator.go b/internal/pkg/agent/operation/operator.go index 71cb6569671..ed28b7cb633 100644 --- a/internal/pkg/agent/operation/operator.go +++ b/internal/pkg/agent/operation/operator.go @@ -20,9 +20,11 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/agent/errors" "github.com/elastic/elastic-agent/internal/pkg/agent/program" "github.com/elastic/elastic-agent/internal/pkg/agent/stateresolver" + "github.com/elastic/elastic-agent/internal/pkg/artifact" "github.com/elastic/elastic-agent/internal/pkg/artifact/download" "github.com/elastic/elastic-agent/internal/pkg/artifact/install" "github.com/elastic/elastic-agent/internal/pkg/artifact/uninstall" + "github.com/elastic/elastic-agent/internal/pkg/config" "github.com/elastic/elastic-agent/internal/pkg/core/app" "github.com/elastic/elastic-agent/internal/pkg/core/monitoring" "github.com/elastic/elastic-agent/internal/pkg/core/monitoring/noop" @@ -115,12 +117,51 @@ func NewOperator( operator.initHandlerMap() - os.MkdirAll(config.DownloadConfig.TargetDirectory, 0755) - os.MkdirAll(config.DownloadConfig.InstallPath, 0755) + if err := os.MkdirAll(config.DownloadConfig.TargetDirectory, 0755); err != nil { + // can already exists from previous runs, not an error + logger.Warnf("failed creating %q: %v", config.DownloadConfig.TargetDirectory, err) + } + if err := os.MkdirAll(config.DownloadConfig.InstallPath, 0755); err != nil { + // can already exists from previous runs, not an error + logger.Warnf("failed creating %q: %v", config.DownloadConfig.InstallPath, err) + } return operator, nil } +func (o *Operator) Reload(rawConfig *config.Config) error { + // save some unpacking in downloaders + type reloadConfig struct { + C *artifact.Config `json:"agent.download" config:"agent.download"` + } + tmp := &reloadConfig{ + C: artifact.DefaultConfig(), + } + if err := rawConfig.Unpack(&tmp); err != nil { + return errors.New(err, "failed to unpack artifact config") + } + + if err := o.reloadComponent(o.downloader, "downloader", tmp.C); err != nil { + return err + } + + return o.reloadComponent(o.verifier, "verifier", tmp.C) +} + +func (o *Operator) reloadComponent(component interface{}, name string, cfg *artifact.Config) error { + r, ok := component.(artifact.ConfigReloader) + if !ok { + o.logger.Debugf("failed reloading %q: component is not reloadable", name) + return nil // not an error, could be filesystem downloader/verifier + } + + if err := r.Reload(cfg); err != nil { + return errors.New(err, fmt.Sprintf("failed reloading %q config", component)) + } + + return nil +} + // State describes the current state of the system. // Reports all known applications and theirs states. Whether they are running // or not, and if they are information about process is also present. @@ -238,12 +279,12 @@ func (o *Operator) Shutdown() { a.Shutdown() wg.Done() o.logger.Debugf("took %s to shutdown %s", - time.Now().Sub(started), a.Name()) + time.Since(started), a.Name()) }(a) } wg.Wait() o.logger.Debugf("took %s to shutdown %d apps", - time.Now().Sub(started), len(o.apps)) + time.Since(started), len(o.apps)) } // Start starts a new process based on a configuration diff --git a/internal/pkg/artifact/config.go b/internal/pkg/artifact/config.go index d88031e5de5..76637c28d31 100644 --- a/internal/pkg/artifact/config.go +++ b/internal/pkg/artifact/config.go @@ -25,6 +25,10 @@ const ( defaultSourceURI = "https://artifacts.elastic.co/downloads/" ) +type ConfigReloader interface { + Reload(*Config) error +} + // Config is a configuration used for verifier and downloader type Config struct { // OperatingSystem: operating system [linux, windows, darwin] @@ -53,14 +57,16 @@ type Config struct { } type Reloader struct { - log *logger.Logger - cfg *Config + log *logger.Logger + cfg *Config + reloaders []ConfigReloader } -func NewReloader(cfg *Config, log *logger.Logger) *Reloader { +func NewReloader(cfg *Config, log *logger.Logger, rr ...ConfigReloader) *Reloader { return &Reloader{ - cfg: cfg, - log: log, + cfg: cfg, + log: log, + reloaders: rr, } } @@ -73,6 +79,12 @@ func (r *Reloader) Reload(rawConfig *config.Config) error { return errors.New(err, "failed to reload source URI") } + for _, reloader := range r.reloaders { + if err := reloader.Reload(r.cfg); err != nil { + return errors.New(err, "failed reloading config") + } + } + return nil } diff --git a/internal/pkg/artifact/download/composed/downloader.go b/internal/pkg/artifact/download/composed/downloader.go index 0b8504172f3..06c78fecdd6 100644 --- a/internal/pkg/artifact/download/composed/downloader.go +++ b/internal/pkg/artifact/download/composed/downloader.go @@ -10,7 +10,9 @@ import ( "github.com/hashicorp/go-multierror" "go.elastic.co/apm" + "github.com/elastic/elastic-agent/internal/pkg/agent/errors" "github.com/elastic/elastic-agent/internal/pkg/agent/program" + "github.com/elastic/elastic-agent/internal/pkg/artifact" "github.com/elastic/elastic-agent/internal/pkg/artifact/download" ) @@ -50,3 +52,17 @@ func (e *Downloader) Download(ctx context.Context, spec program.Spec, version st return "", err } + +func (e *Downloader) Reload(c *artifact.Config) error { + for _, d := range e.dd { + reloadable, ok := d.(download.Reloader) + if !ok { + continue + } + + if err := reloadable.Reload(c); err != nil { + return errors.New(err, "failed reloading artifact config for composed downloader") + } + } + return nil +} diff --git a/internal/pkg/artifact/download/composed/verifier.go b/internal/pkg/artifact/download/composed/verifier.go index 9fb60d20007..ec99dfa4b83 100644 --- a/internal/pkg/artifact/download/composed/verifier.go +++ b/internal/pkg/artifact/download/composed/verifier.go @@ -5,11 +5,11 @@ package composed import ( - "errors" - "github.com/hashicorp/go-multierror" + "github.com/elastic/elastic-agent/internal/pkg/agent/errors" "github.com/elastic/elastic-agent/internal/pkg/agent/program" + "github.com/elastic/elastic-agent/internal/pkg/artifact" "github.com/elastic/elastic-agent/internal/pkg/artifact/download" ) @@ -54,3 +54,17 @@ func (e *Verifier) Verify(spec program.Spec, version string) error { return err } + +func (e *Verifier) Reload(c *artifact.Config) error { + for _, v := range e.vv { + reloadable, ok := v.(download.Reloader) + if !ok { + continue + } + + if err := reloadable.Reload(c); err != nil { + return errors.New(err, "failed reloading artifact config for composed verifier") + } + } + return nil +} diff --git a/internal/pkg/artifact/download/http/downloader.go b/internal/pkg/artifact/download/http/downloader.go index 2da6e3d1015..9dba5783bc7 100644 --- a/internal/pkg/artifact/download/http/downloader.go +++ b/internal/pkg/artifact/download/http/downloader.go @@ -20,7 +20,6 @@ import ( "github.com/elastic/elastic-agent-libs/atomic" "github.com/elastic/elastic-agent-libs/transport/httpcommon" - "github.com/elastic/elastic-agent/internal/pkg/agent/errors" "github.com/elastic/elastic-agent/internal/pkg/agent/program" "github.com/elastic/elastic-agent/internal/pkg/artifact" @@ -73,6 +72,23 @@ func NewDownloaderWithClient(log progressLogger, config *artifact.Config, client } } +func (e *Downloader) Reload(c *artifact.Config) error { + // reload client + client, err := c.HTTPTransportSettings.Client( + httpcommon.WithAPMHTTPInstrumentation(), + ) + if err != nil { + return errors.New(err, "http.downloader: failed to generate client out of config") + } + + client.Transport = withHeaders(client.Transport, headers) + + e.client = *client + e.config = c + + return nil +} + // Download fetches the package from configured source. // Returns absolute path to downloaded package and an error. func (e *Downloader) Download(ctx context.Context, spec program.Spec, version string) (_ string, err error) { @@ -80,7 +96,9 @@ func (e *Downloader) Download(ctx context.Context, spec program.Spec, version st defer func() { if err != nil { for _, path := range downloadedFiles { - os.Remove(path) + if err := os.Remove(path); err != nil { + e.log.Warnf("failed to cleanup %s: %v", path, err) + } } } }() @@ -164,12 +182,14 @@ func (e *Downloader) downloadFile(ctx context.Context, artifactName, filename, f resp, err := e.client.Do(req.WithContext(ctx)) if err != nil { - return "", errors.New(err, "fetching package failed", errors.TypeNetwork, errors.M(errors.MetaKeyURI, sourceURI)) + // return path, file already exists and needs to be cleaned up + return fullPath, errors.New(err, "fetching package failed", errors.TypeNetwork, errors.M(errors.MetaKeyURI, sourceURI)) } defer resp.Body.Close() if resp.StatusCode != 200 { - return "", errors.New(fmt.Sprintf("call to '%s' returned unsuccessful status code: %d", sourceURI, resp.StatusCode), errors.TypeNetwork, errors.M(errors.MetaKeyURI, sourceURI)) + // return path, file already exists and needs to be cleaned up + return fullPath, errors.New(fmt.Sprintf("call to '%s' returned unsuccessful status code: %d", sourceURI, resp.StatusCode), errors.TypeNetwork, errors.M(errors.MetaKeyURI, sourceURI)) } fileSize := -1 @@ -186,7 +206,8 @@ func (e *Downloader) downloadFile(ctx context.Context, artifactName, filename, f if err != nil { reportCancel() dp.ReportFailed(err) - return "", errors.New(err, "fetching package failed", errors.TypeNetwork, errors.M(errors.MetaKeyURI, sourceURI)) + // return path, file already exists and needs to be cleaned up + return fullPath, errors.New(err, "copying fetched package failed", errors.TypeNetwork, errors.M(errors.MetaKeyURI, sourceURI)) } reportCancel() dp.ReportComplete() diff --git a/internal/pkg/artifact/download/http/verifier.go b/internal/pkg/artifact/download/http/verifier.go index 1fe855fa2af..4234ccae93a 100644 --- a/internal/pkg/artifact/download/http/verifier.go +++ b/internal/pkg/artifact/download/http/verifier.go @@ -60,6 +60,24 @@ func NewVerifier(config *artifact.Config, allowEmptyPgp bool, pgp []byte) (*Veri return v, nil } +func (v *Verifier) Reload(c *artifact.Config) error { + // reload client + client, err := c.HTTPTransportSettings.Client( + httpcommon.WithAPMHTTPInstrumentation(), + httpcommon.WithModRoundtripper(func(rt http.RoundTripper) http.RoundTripper { + return withHeaders(rt, headers) + }), + ) + if err != nil { + return errors.New(err, "http.verifier: failed to generate client out of config") + } + + v.client = *client + v.config = c + + return nil +} + // Verify checks downloaded package on preconfigured // location against a key stored on elastic.co website. func (v *Verifier) Verify(spec program.Spec, version string) error { diff --git a/internal/pkg/artifact/download/reloadable.go b/internal/pkg/artifact/download/reloadable.go new file mode 100644 index 00000000000..27845510316 --- /dev/null +++ b/internal/pkg/artifact/download/reloadable.go @@ -0,0 +1,14 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package download + +import ( + "github.com/elastic/elastic-agent/internal/pkg/artifact" +) + +// Reloader is an interface allowing to reload artifact config +type Reloader interface { + Reload(*artifact.Config) error +} diff --git a/internal/pkg/artifact/download/snapshot/downloader.go b/internal/pkg/artifact/download/snapshot/downloader.go index c3680147927..619c95a4c5a 100644 --- a/internal/pkg/artifact/download/snapshot/downloader.go +++ b/internal/pkg/artifact/download/snapshot/downloader.go @@ -5,11 +5,14 @@ package snapshot import ( + "context" "encoding/json" "fmt" "strings" "github.com/elastic/elastic-agent-libs/transport/httpcommon" + "github.com/elastic/elastic-agent/internal/pkg/agent/errors" + "github.com/elastic/elastic-agent/internal/pkg/agent/program" "github.com/elastic/elastic-agent/internal/pkg/artifact" "github.com/elastic/elastic-agent/internal/pkg/artifact/download" "github.com/elastic/elastic-agent/internal/pkg/artifact/download/http" @@ -17,6 +20,11 @@ import ( "github.com/elastic/elastic-agent/pkg/core/logger" ) +type Downloader struct { + downloader download.Downloader + versionOverride string +} + // NewDownloader creates a downloader which first checks local directory // and then fallbacks to remote if configured. func NewDownloader(log *logger.Logger, config *artifact.Config, versionOverride string) (download.Downloader, error) { @@ -24,7 +32,36 @@ func NewDownloader(log *logger.Logger, config *artifact.Config, versionOverride if err != nil { return nil, err } - return http.NewDownloader(log, cfg) + + httpDownloader, err := http.NewDownloader(log, cfg) + if err != nil { + return nil, errors.New(err, "failed to create snapshot downloader") + } + + return &Downloader{ + downloader: httpDownloader, + versionOverride: versionOverride, + }, nil +} + +func (e *Downloader) Reload(c *artifact.Config) error { + reloader, ok := e.downloader.(artifact.ConfigReloader) + if !ok { + return nil + } + + cfg, err := snapshotConfig(c, e.versionOverride) + if err != nil { + return errors.New(err, "snapshot.downloader: failed to generate snapshot config") + } + + return reloader.Reload(cfg) +} + +// Download fetches the package from configured source. +// Returns absolute path to downloaded package and an error. +func (e *Downloader) Download(ctx context.Context, spec program.Spec, version string) (string, error) { + return e.downloader.Download(ctx, spec, version) } func snapshotConfig(config *artifact.Config, versionOverride string) (*artifact.Config, error) { diff --git a/internal/pkg/artifact/download/snapshot/verifier.go b/internal/pkg/artifact/download/snapshot/verifier.go index e4e4e667be7..2e844635234 100644 --- a/internal/pkg/artifact/download/snapshot/verifier.go +++ b/internal/pkg/artifact/download/snapshot/verifier.go @@ -5,11 +5,18 @@ package snapshot import ( + "github.com/elastic/elastic-agent/internal/pkg/agent/errors" + "github.com/elastic/elastic-agent/internal/pkg/agent/program" "github.com/elastic/elastic-agent/internal/pkg/artifact" "github.com/elastic/elastic-agent/internal/pkg/artifact/download" "github.com/elastic/elastic-agent/internal/pkg/artifact/download/http" ) +type Verifier struct { + verifier download.Verifier + versionOverride string +} + // NewVerifier creates a downloader which first checks local directory // and then fallbacks to remote if configured. func NewVerifier(config *artifact.Config, allowEmptyPgp bool, pgp []byte, versionOverride string) (download.Verifier, error) { @@ -17,5 +24,33 @@ func NewVerifier(config *artifact.Config, allowEmptyPgp bool, pgp []byte, versio if err != nil { return nil, err } - return http.NewVerifier(cfg, allowEmptyPgp, pgp) + v, err := http.NewVerifier(cfg, allowEmptyPgp, pgp) + if err != nil { + return nil, errors.New(err, "failed to create snapshot verifier") + } + + return &Verifier{ + verifier: v, + versionOverride: versionOverride, + }, nil +} + +// Verify checks the package from configured source. +func (e *Verifier) Verify(spec program.Spec, version string) error { + return e.verifier.Verify(spec, version) +} + +func (e *Verifier) Reload(c *artifact.Config) error { + reloader, ok := e.verifier.(artifact.ConfigReloader) + if !ok { + return nil + } + + cfg, err := snapshotConfig(c, e.versionOverride) + if err != nil { + return errors.New(err, "snapshot.downloader: failed to generate snapshot config") + } + + return reloader.Reload(cfg) + } From 64cb5a05121904c2b1f1a7b7af5227c8f56ced5a Mon Sep 17 00:00:00 2001 From: Aleksandr Maus Date: Mon, 8 Aug 2022 18:48:57 -0400 Subject: [PATCH 076/180] =?UTF-8?q?Bundle=20elastic-agent.app=20for=20MacO?= =?UTF-8?q?S,=20needed=20to=20be=20able=20to=20enable=20the=20=20=E2=80=A6?= =?UTF-8?q?=20(#714)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Bundle elastic-agent.app for MacOS, needed to be able to enable the Full Disk Access * Calm down the linter * Fix pathing for windows unit test --- dev-tools/packaging/files/darwin/PkgInfo | 1 + dev-tools/packaging/packages.yml | 230 ++++++++++-------- .../templates/darwin/Info.plist.tmpl | 20 ++ internal/pkg/agent/application/info/state.go | 24 +- .../pkg/agent/application/info/state_test.go | 53 ++++ .../pkg/agent/application/paths/common.go | 19 +- internal/pkg/agent/install/install.go | 47 +++- 7 files changed, 280 insertions(+), 114 deletions(-) create mode 100644 dev-tools/packaging/files/darwin/PkgInfo create mode 100644 dev-tools/packaging/templates/darwin/Info.plist.tmpl create mode 100644 internal/pkg/agent/application/info/state_test.go diff --git a/dev-tools/packaging/files/darwin/PkgInfo b/dev-tools/packaging/files/darwin/PkgInfo new file mode 100644 index 00000000000..bd04210fb49 --- /dev/null +++ b/dev-tools/packaging/files/darwin/PkgInfo @@ -0,0 +1 @@ +APPL???? \ No newline at end of file diff --git a/dev-tools/packaging/packages.yml b/dev-tools/packaging/packages.yml index bd5e9d1722c..860e86e97a7 100644 --- a/dev-tools/packaging/packages.yml +++ b/dev-tools/packaging/packages.yml @@ -280,13 +280,7 @@ shared: mode: 0644 skip_on_missing: true - - &agent_binary_files - '{{.BeatName}}{{.BinaryExt}}': - source: build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} - mode: 0755 - 'data/{{.BeatName}}-{{ commit_short }}/{{.BeatName}}{{.BinaryExt}}': - source: build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} - mode: 0755 + - &agent_binary_common_files LICENSE.txt: source: '{{ repo.RootDir }}/LICENSE.txt' mode: 0644 @@ -312,103 +306,139 @@ shared: {{ commit }} mode: 0644 - # Binary package spec (tar.gz for linux/darwin) for community beats. + - &agent_binary_files + '{{.BeatName}}{{.BinaryExt}}': + source: build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} + mode: 0755 + 'data/{{.BeatName}}-{{ commit_short }}/{{.BeatName}}{{.BinaryExt}}': + source: build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} + mode: 0755 + <<: *agent_binary_common_files + + - &agent_darwin_app_bundle_files + 'data/{{.BeatName}}-{{ commit_short }}/elastic-agent.app/Contents/Info.plist': + template: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/darwin/Info.plist.tmpl' + mode: 0644 + 'data/{{.BeatName}}-{{ commit_short }}/elastic-agent.app/Contents/PkgInfo': + template: '{{ elastic_beats_dir }}/dev-tools/packaging/files/darwin/PkgInfo' + mode: 0644 + + - &agent_darwin_binary_files + '{{.BeatName}}{{.BinaryExt}}': + source: build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} + mode: 0755 + 'data/{{.BeatName}}-{{ commit_short }}/elastic-agent.app/Contents/MacOS/{{.BeatName}}{{.BinaryExt}}': + source: build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} + mode: 0755 + <<: *agent_darwin_app_bundle_files + <<: *agent_binary_common_files + + - &beats_targz_binary_files + 'data/{{.BeatName}}-{{ commit_short }}/downloads/filebeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz': + source: '{{.AgentDropPath}}/filebeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz' + mode: 0644 + 'data/{{.BeatName}}-{{ commit_short }}/downloads/filebeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.sha512': + source: '{{.AgentDropPath}}/filebeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.sha512' + mode: 0644 + 'data/{{.BeatName}}-{{ commit_short }}/downloads/filebeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.asc': + source: '{{.AgentDropPath}}/filebeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.asc' + mode: 0644 + skip_on_missing: true + 'data/{{.BeatName}}-{{ commit_short }}/downloads/heartbeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz': + source: '{{.AgentDropPath}}/heartbeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz' + mode: 0644 + 'data/{{.BeatName}}-{{ commit_short }}/downloads/heartbeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.sha512': + source: '{{.AgentDropPath}}/heartbeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.sha512' + mode: 0644 + 'data/{{.BeatName}}-{{ commit_short }}/downloads/heartbeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.asc': + source: '{{.AgentDropPath}}/heartbeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.asc' + mode: 0644 + skip_on_missing: true + 'data/{{.BeatName}}-{{ commit_short }}/downloads/metricbeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz': + source: '{{.AgentDropPath}}/metricbeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz' + mode: 0644 + 'data/{{.BeatName}}-{{ commit_short }}/downloads/metricbeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.sha512': + source: '{{.AgentDropPath}}/metricbeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.sha512' + mode: 0644 + 'data/{{.BeatName}}-{{ commit_short }}/downloads/metricbeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.asc': + source: '{{.AgentDropPath}}/metricbeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.asc' + mode: 0644 + skip_on_missing: true + 'data/{{.BeatName}}-{{ commit_short }}/downloads/osquerybeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz': + source: '{{.AgentDropPath}}/osquerybeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz' + mode: 0644 + skip_on_missing: true + 'data/{{.BeatName}}-{{ commit_short }}/downloads/osquerybeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.sha512': + source: '{{.AgentDropPath}}/osquerybeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.sha512' + mode: 0644 + skip_on_missing: true + 'data/{{.BeatName}}-{{ commit_short }}/downloads/osquerybeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.asc': + source: '{{.AgentDropPath}}/osquerybeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.asc' + mode: 0644 + skip_on_missing: true + 'data/{{.BeatName}}-{{ commit_short }}/downloads/endpoint-security-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz': + source: '{{.AgentDropPath}}/endpoint-security-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz' + mode: 0644 + skip_on_missing: true + 'data/{{.BeatName}}-{{ commit_short }}/downloads/endpoint-security-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.sha512': + source: '{{.AgentDropPath}}/endpoint-security-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.sha512' + mode: 0644 + skip_on_missing: true + 'data/{{.BeatName}}-{{ commit_short }}/downloads/endpoint-security-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.asc': + source: '{{.AgentDropPath}}/endpoint-security-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.asc' + mode: 0644 + skip_on_missing: true + 'data/{{.BeatName}}-{{ commit_short }}/downloads/apm-server-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz': + source: '{{.AgentDropPath}}/apm-server-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz' + mode: 0644 + skip_on_missing: true + 'data/{{.BeatName}}-{{ commit_short }}/downloads/apm-server-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.sha512': + source: '{{.AgentDropPath}}/apm-server-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.sha512' + mode: 0644 + skip_on_missing: true + 'data/{{.BeatName}}-{{ commit_short }}/downloads/apm-server-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.asc': + source: '{{.AgentDropPath}}/apm-server-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.asc' + mode: 0644 + skip_on_missing: true + 'data/{{.BeatName}}-{{ commit_short }}/downloads/fleet-server-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz': + source: '{{.AgentDropPath}}/fleet-server-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz' + mode: 0644 + skip_on_missing: true + 'data/{{.BeatName}}-{{ commit_short }}/downloads/fleet-server-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.sha512': + source: '{{.AgentDropPath}}/fleet-server-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.sha512' + mode: 0644 + skip_on_missing: true + 'data/{{.BeatName}}-{{ commit_short }}/downloads/fleet-server-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.asc': + source: '{{.AgentDropPath}}/fleet-server-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.asc' + mode: 0644 + skip_on_missing: true + 'data/{{.BeatName}}-{{ commit_short }}/downloads/cloudbeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz': + source: '{{.AgentDropPath}}/cloudbeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz' + mode: 0644 + skip_on_missing: true + 'data/{{.BeatName}}-{{ commit_short }}/downloads/cloudbeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.sha512': + source: '{{.AgentDropPath}}/cloudbeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.sha512' + mode: 0644 + skip_on_missing: true + 'data/{{.BeatName}}-{{ commit_short }}/downloads/cloudbeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.asc': + source: '{{.AgentDropPath}}/cloudbeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.asc' + mode: 0644 + skip_on_missing: true + + # Binary package spec (tar.gz for linux) for community beats. - &agent_binary_spec <<: *common files: <<: *agent_binary_files - 'data/{{.BeatName}}-{{ commit_short }}/downloads/filebeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz': - source: '{{.AgentDropPath}}/filebeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz' - mode: 0644 - 'data/{{.BeatName}}-{{ commit_short }}/downloads/filebeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.sha512': - source: '{{.AgentDropPath}}/filebeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.sha512' - mode: 0644 - 'data/{{.BeatName}}-{{ commit_short }}/downloads/filebeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.asc': - source: '{{.AgentDropPath}}/filebeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.asc' - mode: 0644 - skip_on_missing: true - 'data/{{.BeatName}}-{{ commit_short }}/downloads/heartbeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz': - source: '{{.AgentDropPath}}/heartbeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz' - mode: 0644 - 'data/{{.BeatName}}-{{ commit_short }}/downloads/heartbeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.sha512': - source: '{{.AgentDropPath}}/heartbeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.sha512' - mode: 0644 - 'data/{{.BeatName}}-{{ commit_short }}/downloads/heartbeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.asc': - source: '{{.AgentDropPath}}/heartbeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.asc' - mode: 0644 - skip_on_missing: true - 'data/{{.BeatName}}-{{ commit_short }}/downloads/metricbeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz': - source: '{{.AgentDropPath}}/metricbeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz' - mode: 0644 - 'data/{{.BeatName}}-{{ commit_short }}/downloads/metricbeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.sha512': - source: '{{.AgentDropPath}}/metricbeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.sha512' - mode: 0644 - 'data/{{.BeatName}}-{{ commit_short }}/downloads/metricbeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.asc': - source: '{{.AgentDropPath}}/metricbeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.asc' - mode: 0644 - skip_on_missing: true - 'data/{{.BeatName}}-{{ commit_short }}/downloads/osquerybeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz': - source: '{{.AgentDropPath}}/osquerybeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz' - mode: 0644 - skip_on_missing: true - 'data/{{.BeatName}}-{{ commit_short }}/downloads/osquerybeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.sha512': - source: '{{.AgentDropPath}}/osquerybeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.sha512' - mode: 0644 - skip_on_missing: true - 'data/{{.BeatName}}-{{ commit_short }}/downloads/osquerybeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.asc': - source: '{{.AgentDropPath}}/osquerybeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.asc' - mode: 0644 - skip_on_missing: true - 'data/{{.BeatName}}-{{ commit_short }}/downloads/endpoint-security-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz': - source: '{{.AgentDropPath}}/endpoint-security-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz' - mode: 0644 - skip_on_missing: true - 'data/{{.BeatName}}-{{ commit_short }}/downloads/endpoint-security-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.sha512': - source: '{{.AgentDropPath}}/endpoint-security-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.sha512' - mode: 0644 - skip_on_missing: true - 'data/{{.BeatName}}-{{ commit_short }}/downloads/endpoint-security-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.asc': - source: '{{.AgentDropPath}}/endpoint-security-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.asc' - mode: 0644 - skip_on_missing: true - 'data/{{.BeatName}}-{{ commit_short }}/downloads/apm-server-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz': - source: '{{.AgentDropPath}}/apm-server-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz' - mode: 0644 - skip_on_missing: true - 'data/{{.BeatName}}-{{ commit_short }}/downloads/apm-server-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.sha512': - source: '{{.AgentDropPath}}/apm-server-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.sha512' - mode: 0644 - skip_on_missing: true - 'data/{{.BeatName}}-{{ commit_short }}/downloads/apm-server-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.asc': - source: '{{.AgentDropPath}}/apm-server-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.asc' - mode: 0644 - skip_on_missing: true - 'data/{{.BeatName}}-{{ commit_short }}/downloads/fleet-server-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz': - source: '{{.AgentDropPath}}/fleet-server-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz' - mode: 0644 - skip_on_missing: true - 'data/{{.BeatName}}-{{ commit_short }}/downloads/fleet-server-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.sha512': - source: '{{.AgentDropPath}}/fleet-server-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.sha512' - mode: 0644 - skip_on_missing: true - 'data/{{.BeatName}}-{{ commit_short }}/downloads/fleet-server-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.asc': - source: '{{.AgentDropPath}}/fleet-server-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.asc' - mode: 0644 - skip_on_missing: true - 'data/{{.BeatName}}-{{ commit_short }}/downloads/cloudbeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz': - source: '{{.AgentDropPath}}/cloudbeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz' - mode: 0644 - skip_on_missing: true - 'data/{{.BeatName}}-{{ commit_short }}/downloads/cloudbeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.sha512': - source: '{{.AgentDropPath}}/cloudbeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.sha512' - mode: 0644 - skip_on_missing: true - 'data/{{.BeatName}}-{{ commit_short }}/downloads/cloudbeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.asc': - source: '{{.AgentDropPath}}/cloudbeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.asc' - mode: 0644 - skip_on_missing: true + <<: *beats_targz_binary_files + - &agent_darwin_binary_spec + <<: *common + files: + <<: *agent_darwin_binary_files + <<: *beats_targz_binary_files + # Binary package spec (zip for windows) for community beats. - &agent_windows_binary_spec <<: *common @@ -1056,11 +1086,11 @@ specs: - os: darwin types: [tgz] spec: - <<: *agent_binary_spec + <<: *agent_darwin_binary_spec <<: *elastic_license_for_binaries files: '{{.BeatName}}{{.BinaryExt}}': - source: data/{{.BeatName}}-{{ commit_short }}/{{.BeatName}}{{.BinaryExt}} + source: data/{{.BeatName}}-{{ commit_short }}/elastic-agent.app/Contents/MacOS/{{.BeatName}}{{.BinaryExt}} symlink: true mode: 0755 diff --git a/dev-tools/packaging/templates/darwin/Info.plist.tmpl b/dev-tools/packaging/templates/darwin/Info.plist.tmpl new file mode 100644 index 00000000000..b98202219b5 --- /dev/null +++ b/dev-tools/packaging/templates/darwin/Info.plist.tmpl @@ -0,0 +1,20 @@ + + + + + CFBundleExecutable + elastic-agent + CFBundleIdentifier + co.elastic.elastic-agent + CFBundleInfoDictionaryVersion + 6.0 + CFBundleName + elastic-agent + CFBundlePackageType + APPL + CFBundleShortVersionString + {{ beat_version }} + CFBundleVersion + {{ beat_version }} + + diff --git a/internal/pkg/agent/application/info/state.go b/internal/pkg/agent/application/info/state.go index 1a6602f51f8..e00948fab58 100644 --- a/internal/pkg/agent/application/info/state.go +++ b/internal/pkg/agent/application/info/state.go @@ -8,18 +8,27 @@ import ( "fmt" "os" "path/filepath" + "runtime" "strings" "github.com/elastic/elastic-agent/internal/pkg/agent/application/paths" "github.com/elastic/elastic-agent/internal/pkg/release" ) +const ( + darwin = "darwin" +) + // RunningInstalled returns true when executing Agent is the installed Agent. // // This verifies the running executable path based on hard-coded paths // for each platform type. func RunningInstalled() bool { - expected := filepath.Join(paths.InstallPath, paths.BinaryName) + expectedPaths := []string{filepath.Join(paths.InstallPath, paths.BinaryName)} + if runtime.GOOS == darwin { + // For the symlink on darwin the execPath is /usr/local/bin/elastic-agent + expectedPaths = append(expectedPaths, paths.ShellWrapperPath) + } execPath, _ := os.Executable() execPath, _ = filepath.Abs(execPath) execName := filepath.Base(execPath) @@ -28,13 +37,24 @@ func RunningInstalled() bool { // executable path is being reported as being down inside of data path // move up to directories to perform the comparison execDir = filepath.Dir(filepath.Dir(execDir)) + if runtime.GOOS == darwin { + execDir = filepath.Dir(filepath.Dir(filepath.Dir(execDir))) + } execPath = filepath.Join(execDir, execName) } - return paths.ArePathsEqual(expected, execPath) + for _, expected := range expectedPaths { + if paths.ArePathsEqual(expected, execPath) { + return true + } + } + return false } // IsInsideData returns true when the exePath is inside of the current Agents data path. func IsInsideData(exePath string) bool { expectedPath := filepath.Join("data", fmt.Sprintf("elastic-agent-%s", release.ShortCommit())) + if runtime.GOOS == darwin { + expectedPath = filepath.Join(expectedPath, "elastic-agent.app", "Contents", "MacOS") + } return strings.HasSuffix(exePath, expectedPath) } diff --git a/internal/pkg/agent/application/info/state_test.go b/internal/pkg/agent/application/info/state_test.go new file mode 100644 index 00000000000..39f5b7e9738 --- /dev/null +++ b/internal/pkg/agent/application/info/state_test.go @@ -0,0 +1,53 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package info + +import ( + "fmt" + "path/filepath" + "runtime" + "testing" + + "github.com/elastic/elastic-agent/internal/pkg/release" + "github.com/google/go-cmp/cmp" +) + +func TestIsInsideData(t *testing.T) { + + validExePath := filepath.Join("data", fmt.Sprintf("elastic-agent-%s", release.ShortCommit())) + + if runtime.GOOS == darwin { + validExePath = filepath.Join(validExePath, "elastic-agent.app", "Contents", "MacOS") + } + + tests := []struct { + name string + exePath string + res bool + }{ + { + name: "empty", + }, + { + name: "invalid", + exePath: "data/elastic-agent", + }, + { + name: "valid", + exePath: validExePath, + res: true, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + res := IsInsideData(tc.exePath) + diff := cmp.Diff(tc.res, res) + if diff != "" { + t.Error(diff) + } + }) + } +} diff --git a/internal/pkg/agent/application/paths/common.go b/internal/pkg/agent/application/paths/common.go index 7511a791146..3bebe122154 100644 --- a/internal/pkg/agent/application/paths/common.go +++ b/internal/pkg/agent/application/paths/common.go @@ -9,6 +9,7 @@ import ( "fmt" "os" "path/filepath" + "runtime" "strings" "sync" @@ -21,6 +22,8 @@ const ( // AgentLockFileName is the name of the overall Elastic Agent file lock. AgentLockFileName = "agent.lock" tempSubdir = "tmp" + + darwin = "darwin" ) var ( @@ -68,8 +71,10 @@ func SetTop(path string) { func TempDir() string { tmpDir := filepath.Join(Data(), tempSubdir) tmpCreator.Do(func() { - // create tempdir as it probably don't exists - os.MkdirAll(tmpDir, 0750) + // Create tempdir as it probably don't exists. + // The error was not checked here before and the linter is not happy about it. + // Changing this now would lead to the wide change scope that intended at the moment, so just making the linter happy for now. + _ = os.MkdirAll(tmpDir, 0750) }) return tmpDir } @@ -172,10 +177,15 @@ func SetInstall(path string) { // initialTop returns the initial top-level path for the binary // // When nested in top-level/data/elastic-agent-${hash}/ the result is top-level/. +// The agent fexecutable for MacOS is wrappend in the bundle, so the path to the binary is +// top-level/data/elastic-agent-${hash}/elastic-agent.app/Contents/MacOS func initialTop() string { exePath := retrieveExecutablePath() if insideData(exePath) { - return filepath.Dir(filepath.Dir(exePath)) + exePath = filepath.Dir(filepath.Dir(exePath)) + if runtime.GOOS == darwin { + exePath = filepath.Dir(filepath.Dir(filepath.Dir(exePath))) + } } return exePath } @@ -196,5 +206,8 @@ func retrieveExecutablePath() string { // insideData returns true when the exePath is inside of the current Agents data path. func insideData(exePath string) bool { expectedPath := filepath.Join("data", fmt.Sprintf("elastic-agent-%s", release.ShortCommit())) + if runtime.GOOS == darwin { + expectedPath = filepath.Join(expectedPath, "elastic-agent.app", "Contents", "MacOS") + } return strings.HasSuffix(exePath, expectedPath) } diff --git a/internal/pkg/agent/install/install.go b/internal/pkg/agent/install/install.go index 58f3fa73312..a5b02eb015b 100644 --- a/internal/pkg/agent/install/install.go +++ b/internal/pkg/agent/install/install.go @@ -9,6 +9,7 @@ import ( "io/ioutil" "os" "path/filepath" + "runtime" "github.com/otiai10/copy" @@ -17,6 +18,10 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/agent/errors" ) +const ( + darwin = "darwin" +) + // Install installs Elastic Agent persistently on the system including creating and starting its service. func Install(cfgFile string) error { dir, err := findDirectory() @@ -53,15 +58,36 @@ func Install(cfgFile string) error { // place shell wrapper, if present on platform if paths.ShellWrapperPath != "" { - err = os.MkdirAll(filepath.Dir(paths.ShellWrapperPath), 0755) - if err == nil { - err = ioutil.WriteFile(paths.ShellWrapperPath, []byte(paths.ShellWrapper), 0755) - } - if err != nil { - return errors.New( - err, - fmt.Sprintf("failed to write shell wrapper (%s)", paths.ShellWrapperPath), - errors.M("destination", paths.ShellWrapperPath)) + // Install symlink for darwin instead + if runtime.GOOS == darwin { + // Check if previous shell wrapper or symlink exists and remove it so it can be overwritten + if _, err := os.Lstat(paths.ShellWrapperPath); err == nil { + if err := os.Remove(paths.ShellWrapperPath); err != nil { + return errors.New( + err, + fmt.Sprintf("failed to remove (%s)", paths.ShellWrapperPath), + errors.M("destination", paths.ShellWrapperPath)) + } + } + err = os.Symlink("/Library/Elastic/Agent/elastic-agent", paths.ShellWrapperPath) + if err != nil { + return errors.New( + err, + fmt.Sprintf("failed to create elastic-agent symlink (%s)", paths.ShellWrapperPath), + errors.M("destination", paths.ShellWrapperPath)) + } + } else { + err = os.MkdirAll(filepath.Dir(paths.ShellWrapperPath), 0755) + if err == nil { + //nolint: gosec // this is intended to be an executable shell script, not chaning the permissions for the linter + err = ioutil.WriteFile(paths.ShellWrapperPath, []byte(paths.ShellWrapper), 0755) + } + if err != nil { + return errors.New( + err, + fmt.Sprintf("failed to write shell wrapper (%s)", paths.ShellWrapperPath), + errors.M("destination", paths.ShellWrapperPath)) + } } } @@ -151,6 +177,9 @@ func findDirectory() (string, error) { // executable path is being reported as being down inside of data path // move up to directories to perform the copy sourceDir = filepath.Dir(filepath.Dir(sourceDir)) + if runtime.GOOS == darwin { + sourceDir = filepath.Dir(filepath.Dir(filepath.Dir(sourceDir))) + } } err = verifyDirectory(sourceDir) if err != nil { From 940a56ce215175054db4397ec6738a1ab1dd08bd Mon Sep 17 00:00:00 2001 From: Florian Lehner Date: Tue, 9 Aug 2022 17:00:24 +0200 Subject: [PATCH 077/180] crossbuild: add fix to set ulimit for debian images (#856) Signed-off-by: Florian Lehner --- dev-tools/mage/crossbuild.go | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/dev-tools/mage/crossbuild.go b/dev-tools/mage/crossbuild.go index 56a6860393b..8a193003ab7 100644 --- a/dev-tools/mage/crossbuild.go +++ b/dev-tools/mage/crossbuild.go @@ -281,6 +281,15 @@ func (b GolangCrossBuilder) Build() error { verbose = "true" } var args []string + // There's a bug on certain debian versions: + // https://discuss.linuxcontainers.org/t/debian-jessie-containers-have-extremely-low-performance/1272 + // basically, apt-get has a bug where will try to iterate through every possible FD as set by the NOFILE ulimit. + // On certain docker installs, docker will set the ulimit to a value > 10^9, which means apt-get will take >1 hour. + // This runs across all possible debian platforms, since there's no real harm in it. + if strings.Contains(image, "debian") { + args = append(args, "--ulimit", "nofile=262144:262144") + } + if runtime.GOOS != "windows" { args = append(args, "--env", "EXEC_UID="+strconv.Itoa(os.Getuid()), From dc5b1a2ec19c04e54f663be166e54f892d3c3385 Mon Sep 17 00:00:00 2001 From: Andrew Cholakian Date: Tue, 9 Aug 2022 16:48:33 -0500 Subject: [PATCH 078/180] [Heartbeat] Cleanup docker install / always add playwright deps (#764) This is the agent counterpart to elastic/beats#32122 Refactors Dockerfile handling of synthetics deps to rely on playwright install-deps rather than us manually keeping up to date with those. This should fix issues with newer playwrights needing additional deps. This also cleans up the Dockerfile a good amount, and fixes indentation. Finally, this removes the unused Dockerfile.elastic-agent.tmpl file since agent is now its own repo. It also cleans up some other metadata that no longer does anything. No changelog is specified because no user facing changes are present. --- .../docker/Dockerfile.elastic-agent.tmpl | 88 ++++++++----------- 1 file changed, 38 insertions(+), 50 deletions(-) diff --git a/dev-tools/packaging/templates/docker/Dockerfile.elastic-agent.tmpl b/dev-tools/packaging/templates/docker/Dockerfile.elastic-agent.tmpl index b78fcfdb196..02358d16d57 100644 --- a/dev-tools/packaging/templates/docker/Dockerfile.elastic-agent.tmpl +++ b/dev-tools/packaging/templates/docker/Dockerfile.elastic-agent.tmpl @@ -44,48 +44,6 @@ RUN for iter in {1..10}; do \ (exit $exit_code) {{- end }} -{{- if (and (contains .image_name "-complete") (not (contains .from "ubi-minimal"))) }} -RUN apt-get update -y && \ - for iter in {1..10}; do \ - DEBIAN_FRONTEND=noninteractive apt-get install --no-install-recommends --yes \ - libglib2.0-0\ - libnss3\ - libnspr4\ - libatk1.0-0\ - libatk-bridge2.0-0\ - libcups2\ - libdrm2\ - libdbus-1-3\ - libxcb1\ - libxkbcommon0\ - libx11-6\ - libxcomposite1\ - libxdamage1\ - libxext6\ - libxfixes3\ - libxrandr2\ - libgbm1\ - libpango-1.0-0\ - libcairo2\ - libasound2\ - libatspi2.0-0\ - libxshmfence1 \ - fonts-noto-core\ - fonts-noto-cjk &&\ - apt-get clean all && \ - exit_code=0 && break || exit_code=$? && echo "apt-get error: retry $iter in 10s" && sleep 10; \ - done; \ - (exit $exit_code) -ENV NODE_PATH={{ $beatHome }}/.node -RUN echo \ - $NODE_PATH \ - {{ $beatHome }}/.config \ - {{ $beatHome }}/.synthetics \ - {{ $beatHome }}/.npm \ - {{ $beatHome }}/.cache \ - | xargs -IDIR sh -c 'mkdir -p DIR && chmod 0770 DIR' -{{- end }} - LABEL \ org.label-schema.build-date="{{ date }}" \ org.label-schema.schema-version="1.0" \ @@ -172,9 +130,7 @@ RUN mkdir /app {{- else }} RUN groupadd --gid 1000 {{ .BeatName }} RUN useradd -M --uid 1000 --gid 1000 --groups 0 --home {{ $beatHome }} {{ .user }} -{{- if (and (contains .image_name "-complete") (not (contains .from "ubi-minimal"))) }} -RUN chown {{ .user }} $NODE_PATH -{{- end }} + {{- if contains .image_name "-cloud" }} # Generate folder for a stub command that will be overwritten at runtime RUN mkdir /app @@ -193,12 +149,19 @@ RUN mkdir -p {{ $beatHome }}/data/{{.BeatName}}-{{ commit_short }}/{{ .beats_ins # heartbeat requires cap_net_raw,cap_setuid to run ICMP checks and change npm user setcap cap_net_raw,cap_setuid+p {{ $beatHome }}/data/{{.BeatName}}-{{ commit_short }}/{{ .beats_install_path }}/heartbeat-*/heartbeat -USER {{ .user }} - {{- if (and (contains .image_name "-complete") (not (contains .from "ubi-minimal"))) }} +USER root +ENV NODE_PATH={{ $beatHome }}/.node +RUN echo \ + $NODE_PATH \ + {{ $beatHome }}/.config \ + {{ $beatHome }}/.synthetics \ + {{ $beatHome }}/.npm \ + {{ $beatHome }}/.cache \ + | xargs -IDIR sh -c 'mkdir -p DIR && chmod 0770 DIR' + # Setup synthetics env vars ENV ELASTIC_SYNTHETICS_CAPABLE=true -ENV SUITES_DIR={{ $beatHome }}/suites ENV NODE_VERSION=16.15.0 ENV PATH="$NODE_PATH/node/bin:$PATH" # Install the latest version of @elastic/synthetics forcefully ignoring the previously @@ -207,6 +170,9 @@ ENV PATH="$NODE_PATH/node/bin:$PATH" RUN cd {{$beatHome}}/.node \ && NODE_DOWNLOAD_URL="" \ && case "$(arch)" in \ + arm64) \ + NODE_DOWNLOAD_URL=https://nodejs.org/dist/v${NODE_VERSION}/node-v${NODE_VERSION}-linux-x64.tar.xz \ + ;; \ x86_64) \ NODE_DOWNLOAD_URL=https://nodejs.org/dist/v${NODE_VERSION}/node-v${NODE_VERSION}-linux-x64.tar.xz \ ;; \ @@ -219,9 +185,31 @@ RUN cd {{$beatHome}}/.node \ esac \ && mkdir -p node \ && curl ${NODE_DOWNLOAD_URL} | tar -xJ --strip 1 -C node \ - && chmod ug+rwX -R $NODE_PATH \ - && npm i -g -f @elastic/synthetics && chmod ug+rwX -R $NODE_PATH + && chmod ug+rwX -R $NODE_PATH + +# Install synthetics as a regular user, installing npm deps as root odesn't work +RUN chown -R {{ .user }} $NODE_PATH +USER {{ .user }} +# If this fails dump the NPM logs +RUN npm i -g --loglevel verbose -f @elastic/synthetics || sh -c 'tail -n +1 /root/.npm/_logs/* && exit 1' +RUN chmod ug+rwX -R $NODE_PATH +USER root + +# Install the deps as needed by the exact version of playwright elastic synthetics uses +# We don't use npx playwright install-deps because that could pull a newer version +# Install additional fonts as well +RUN for iter in {1..10}; do \ + apt-get update -y && \ + $NODE_PATH/node/lib/node_modules/@elastic/synthetics/node_modules/.bin/playwright install-deps chromium && \ + DEBIAN_FRONTEND=noninteractive apt-get install --no-install-recommends --yes \ + fonts-noto \ + fonts-noto-cjk && \ + exit_code=0 && break || exit_code=$? && echo "apt-get error: retry $iter in 10s" && sleep 10; \ + done; \ + (exit $exit_code) + {{- end }} +USER {{ .user }} {{- range $i, $port := .ExposePorts }} From 448b218a0f1f3e9877f5b876cf09b94c6227e72b Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Wed, 10 Aug 2022 01:36:28 -0400 Subject: [PATCH 079/180] [Automation] Update elastic stack version to 8.5.0-41aadc32 for testing (#889) Co-authored-by: apmmachine --- testing/environments/snapshot.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/testing/environments/snapshot.yml b/testing/environments/snapshot.yml index 8d629d8e77b..46eb3c31168 100644 --- a/testing/environments/snapshot.yml +++ b/testing/environments/snapshot.yml @@ -3,7 +3,7 @@ version: '2.3' services: elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch:8.5.0-7dbc10f8-SNAPSHOT + image: docker.elastic.co/elasticsearch/elasticsearch:8.5.0-41aadc32-SNAPSHOT # When extend is used it merges healthcheck.tests, see: # https://github.com/docker/compose/issues/8962 # healthcheck: @@ -42,7 +42,7 @@ services: - ./docker/logstash/pki:/etc/pki:ro kibana: - image: docker.elastic.co/kibana/kibana:8.5.0-7dbc10f8-SNAPSHOT + image: docker.elastic.co/kibana/kibana:8.5.0-41aadc32-SNAPSHOT environment: - "ELASTICSEARCH_USERNAME=kibana_system_user" - "ELASTICSEARCH_PASSWORD=testing" From 56787a61c0b7faed1114160e01dfaa812cd1a95d Mon Sep 17 00:00:00 2001 From: Pier-Hugues Pellerin Date: Wed, 10 Aug 2022 09:49:45 -0400 Subject: [PATCH 080/180] Fix/panic with composable renderer (#823) * Fix a panic with wg passed to the composable object In the code to retrieve the variables from the configuration files we need to pass a execution callback, this callback will be called in a goroutine. This callback can be executed multiple time until the composable renderer is stopped. There were a problem in the code that made the callback called multiple time and it made the waitgroup internal counter to do to a negative values. This commit change the behavior, it start the composable renderer give it a callback when the callback receives the variables it will stop the composable's Run method using the context. This ensure that the callback will be called a single time and that the variables are correctly retrieved. Fixes: #806 --- CHANGELOG.next.asciidoc | 1 + internal/pkg/agent/install/uninstall.go | 27 ++++++++++++++++++++----- 2 files changed, 23 insertions(+), 5 deletions(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 2361baf73f5..9f35f0dffcd 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -112,6 +112,7 @@ - Allow the / char in variable names in eql and transpiler. {issue}715[715] {pull}718[718] - Fix data duplication for standalone agent on Kubernetes using the default manifest {issue-beats}31512[31512] {pull}742[742] - Agent updates will clean up unneeded artifacts. {issue}693[693] {issue}694[694] {pull}752[752] +- Fix a panic caused by a race condition when installing the Elastic Agent. {issues}806[806] ==== New features diff --git a/internal/pkg/agent/install/uninstall.go b/internal/pkg/agent/install/uninstall.go index 87ff47ae169..598ddaeea8c 100644 --- a/internal/pkg/agent/install/uninstall.go +++ b/internal/pkg/agent/install/uninstall.go @@ -13,6 +13,7 @@ import ( "runtime" "strings" "sync" + "time" "github.com/kardianos/service" @@ -233,11 +234,20 @@ func applyDynamics(ctx context.Context, log *logger.Logger, cfg *config.Config) inputs, ok := transpiler.Lookup(ast, "inputs") if ok { varsArray := make([]*transpiler.Vars, 0) - var wg sync.WaitGroup - wg.Add(1) + + // Give some time for the providers to replace the variables + const timeout = 15 * time.Second + var doOnce sync.Once + ctx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + + // The composable system will continuously run, we are only interested in the first run on of the + // renderer to collect the variables we should stop the execution. varsCallback := func(vv []*transpiler.Vars) { - varsArray = vv - wg.Done() + doOnce.Do(func() { + varsArray = vv + cancel() + }) } ctrl, err := composable.New(log, cfg) @@ -245,7 +255,14 @@ func applyDynamics(ctx context.Context, log *logger.Logger, cfg *config.Config) return nil, err } _ = ctrl.Run(ctx, varsCallback) - wg.Wait() + + // Wait for the first callback to retrieve the variables from the providers. + <-ctx.Done() + + // Bail out if callback was not executed in time. + if errors.Is(ctx.Err(), context.DeadlineExceeded) { + return nil, errors.New("failed to get transpiler vars", err) + } renderedInputs, err := transpiler.RenderInputs(inputs, varsArray) if err != nil { From 4e08c7a6306a73369502dda12b43c671facfa6b5 Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Wed, 10 Aug 2022 15:05:19 -0400 Subject: [PATCH 081/180] [Automation] Update go release version to 1.18.5 (#832) Co-authored-by: apmmachine --- .go-version | 2 +- Dockerfile | 2 +- version/docs/version.asciidoc | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.go-version b/.go-version index 4512502b629..8e8b0a9335a 100644 --- a/.go-version +++ b/.go-version @@ -1 +1 @@ -1.17.12 +1.18.5 diff --git a/Dockerfile b/Dockerfile index 6125555c8c0..a4f9b4d338f 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -ARG GO_VERSION=1.17.12 +ARG GO_VERSION=1.18.5 FROM circleci/golang:${GO_VERSION} diff --git a/version/docs/version.asciidoc b/version/docs/version.asciidoc index e33eb13d229..4a1ae4fd6ea 100644 --- a/version/docs/version.asciidoc +++ b/version/docs/version.asciidoc @@ -1,6 +1,6 @@ :stack-version: 8.3.0 :doc-branch: main -:go-version: 1.17.12 +:go-version: 1.18.5 :release-state: unreleased :python: 3.7 :docker: 1.12 From d7f1588aa33bcad39cd345ac51c4bd19c481797d Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Thu, 11 Aug 2022 01:36:45 -0400 Subject: [PATCH 082/180] [Automation] Update elastic stack version to 8.5.0-60a4c029 for testing (#899) Co-authored-by: apmmachine --- testing/environments/snapshot.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/testing/environments/snapshot.yml b/testing/environments/snapshot.yml index 46eb3c31168..664c2dd7ec4 100644 --- a/testing/environments/snapshot.yml +++ b/testing/environments/snapshot.yml @@ -3,7 +3,7 @@ version: '2.3' services: elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch:8.5.0-41aadc32-SNAPSHOT + image: docker.elastic.co/elasticsearch/elasticsearch:8.5.0-60a4c029-SNAPSHOT # When extend is used it merges healthcheck.tests, see: # https://github.com/docker/compose/issues/8962 # healthcheck: @@ -42,7 +42,7 @@ services: - ./docker/logstash/pki:/etc/pki:ro kibana: - image: docker.elastic.co/kibana/kibana:8.5.0-41aadc32-SNAPSHOT + image: docker.elastic.co/kibana/kibana:8.5.0-60a4c029-SNAPSHOT environment: - "ELASTICSEARCH_USERNAME=kibana_system_user" - "ELASTICSEARCH_PASSWORD=testing" From 689aee33c923be6183267c0e45596aa8d758e6eb Mon Sep 17 00:00:00 2001 From: Yash Tewari Date: Fri, 12 Aug 2022 00:00:35 +0530 Subject: [PATCH 083/180] Add control-plane toleration to Agent K8S manifests. (#864) * Add toleration to elastic-agent Kubernetes manifests. The toleration with key node-role.kubernetes.io/control-plane is set to replace the deprecated toleration with key node-role.kubernetes.io/master which will be removed by Kubernetes v1.25 * Remove outdated "master" node terminology. --- deploy/kubernetes/elastic-agent-managed-kubernetes.yaml | 6 ++++-- .../elastic-agent-managed-daemonset.yaml | 6 ++++-- deploy/kubernetes/elastic-agent-standalone-kubernetes.yaml | 6 ++++-- .../elastic-agent-standalone-daemonset.yaml | 6 ++++-- 4 files changed, 16 insertions(+), 8 deletions(-) diff --git a/deploy/kubernetes/elastic-agent-managed-kubernetes.yaml b/deploy/kubernetes/elastic-agent-managed-kubernetes.yaml index 1e2403f47a2..90727654ff7 100644 --- a/deploy/kubernetes/elastic-agent-managed-kubernetes.yaml +++ b/deploy/kubernetes/elastic-agent-managed-kubernetes.yaml @@ -15,9 +15,11 @@ spec: labels: app: elastic-agent spec: - # Tolerations are needed to run Elastic Agent on Kubernetes master nodes. - # Agents running on master nodes collect metrics from the control plane components (scheduler, controller manager) of Kubernetes + # Tolerations are needed to run Elastic Agent on Kubernetes control-plane nodes. + # Agents running on control-plane nodes collect metrics from the control plane components (scheduler, controller manager) of Kubernetes tolerations: + - key: node-role.kubernetes.io/control-plane + effect: NoSchedule - key: node-role.kubernetes.io/master effect: NoSchedule serviceAccountName: elastic-agent diff --git a/deploy/kubernetes/elastic-agent-managed/elastic-agent-managed-daemonset.yaml b/deploy/kubernetes/elastic-agent-managed/elastic-agent-managed-daemonset.yaml index c3c679efa36..fac245ca69a 100644 --- a/deploy/kubernetes/elastic-agent-managed/elastic-agent-managed-daemonset.yaml +++ b/deploy/kubernetes/elastic-agent-managed/elastic-agent-managed-daemonset.yaml @@ -15,9 +15,11 @@ spec: labels: app: elastic-agent spec: - # Tolerations are needed to run Elastic Agent on Kubernetes master nodes. - # Agents running on master nodes collect metrics from the control plane components (scheduler, controller manager) of Kubernetes + # Tolerations are needed to run Elastic Agent on Kubernetes control-plane nodes. + # Agents running on control-plane nodes collect metrics from the control plane components (scheduler, controller manager) of Kubernetes tolerations: + - key: node-role.kubernetes.io/control-plane + effect: NoSchedule - key: node-role.kubernetes.io/master effect: NoSchedule serviceAccountName: elastic-agent diff --git a/deploy/kubernetes/elastic-agent-standalone-kubernetes.yaml b/deploy/kubernetes/elastic-agent-standalone-kubernetes.yaml index 0984f0dc8ac..9b2ffc3a017 100644 --- a/deploy/kubernetes/elastic-agent-standalone-kubernetes.yaml +++ b/deploy/kubernetes/elastic-agent-standalone-kubernetes.yaml @@ -640,9 +640,11 @@ spec: labels: app: elastic-agent-standalone spec: - # Tolerations are needed to run Elastic Agent on Kubernetes master nodes. - # Agents running on master nodes collect metrics from the control plane components (scheduler, controller manager) of Kubernetes + # Tolerations are needed to run Elastic Agent on Kubernetes control-plane nodes. + # Agents running on control-plane nodes collect metrics from the control plane components (scheduler, controller manager) of Kubernetes tolerations: + - key: node-role.kubernetes.io/control-plane + effect: NoSchedule - key: node-role.kubernetes.io/master effect: NoSchedule serviceAccountName: elastic-agent-standalone diff --git a/deploy/kubernetes/elastic-agent-standalone/elastic-agent-standalone-daemonset.yaml b/deploy/kubernetes/elastic-agent-standalone/elastic-agent-standalone-daemonset.yaml index 0bf131ec8ea..3205b13bf88 100644 --- a/deploy/kubernetes/elastic-agent-standalone/elastic-agent-standalone-daemonset.yaml +++ b/deploy/kubernetes/elastic-agent-standalone/elastic-agent-standalone-daemonset.yaml @@ -14,9 +14,11 @@ spec: labels: app: elastic-agent-standalone spec: - # Tolerations are needed to run Elastic Agent on Kubernetes master nodes. - # Agents running on master nodes collect metrics from the control plane components (scheduler, controller manager) of Kubernetes + # Tolerations are needed to run Elastic Agent on Kubernetes control-plane nodes. + # Agents running on control-plane nodes collect metrics from the control plane components (scheduler, controller manager) of Kubernetes tolerations: + - key: node-role.kubernetes.io/control-plane + effect: NoSchedule - key: node-role.kubernetes.io/master effect: NoSchedule serviceAccountName: elastic-agent-standalone From 1ebffe9334bc945510290b56df15efe6ad8220c4 Mon Sep 17 00:00:00 2001 From: Victor Martinez Date: Wed, 17 Aug 2022 13:10:06 +0100 Subject: [PATCH 084/180] install mage with go install (#936) --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 37022ff7d7d..19eca744b78 100644 --- a/Makefile +++ b/Makefile @@ -13,7 +13,7 @@ export MAGE_IMPORT_PATH mage: ifndef MAGE_PRESENT @echo Installing mage $(MAGE_VERSION). - @go get -ldflags="-X $(MAGE_IMPORT_PATH)/mage.gitTag=$(MAGE_VERSION)" ${MAGE_IMPORT_PATH}@$(MAGE_VERSION) + @go install ${MAGE_IMPORT_PATH}@$(MAGE_VERSION) @-mage -clean endif @true From d4f33d00d320d2030826abffc89e497aa3b99bcf Mon Sep 17 00:00:00 2001 From: Andrew Gizas Date: Wed, 17 Aug 2022 16:41:44 +0300 Subject: [PATCH 085/180] Cloudnative ci automation (#837) This commit provides the relevant Jenkins CI automation to open Pull requests to kibana github repository in order to keep Cloud-Native teams manifests in sync with the manifests that are used into Fleet UI. For full information check #706 Updated .ci/Jenkins file that is triggered upon PR requests of /elastic-agent/deploy/kubernetes/* changes Updated Makefile to add functionality needed to create the extra files for the new prs to kibana remote repository --- .ci/Jenkinsfile | 38 ++++++++++ deploy/kubernetes/Makefile | 71 +++++++++++++++++-- deploy/kubernetes/creator_k8s_manifest.sh | 58 +++++++++++++++ .../elastic-agent-managed-kubernetes.yaml | 2 +- .../elastic-agent-managed-daemonset.yaml | 2 +- .../elastic-agent-standalone-kubernetes.yaml | 5 +- ...-agent-standalone-daemonset-configmap.yaml | 2 +- .../elastic-agent-standalone-daemonset.yaml | 3 +- 8 files changed, 169 insertions(+), 12 deletions(-) create mode 100755 deploy/kubernetes/creator_k8s_manifest.sh diff --git a/.ci/Jenkinsfile b/.ci/Jenkinsfile index 78078f79358..764e13952b1 100644 --- a/.ci/Jenkinsfile +++ b/.ci/Jenkinsfile @@ -238,6 +238,44 @@ pipeline { } } } + stage('Sync K8s') { //This stage opens a PR to kibana Repository in order to sync k8s manifests + when { + // Only on main branch + // Enable if k8s related changes. + allOf { + branch 'main' // Only runs for branch main + expression { return env.K8S_CHANGES == "true" } // If k8s changes + } + } + failFast false + agent {label 'ubuntu-20.04 && immutable'} + options { skipDefaultCheckout() } + stages { + stage('OpenKibanaPR') { + steps { + withGhEnv(version: '2.4.0') { + deleteDir() + unstashV2(name: 'source', bucket: "${JOB_GCS_BUCKET}", credentialsId: "${JOB_GCS_CREDENTIALS}") + dir("${BASE_DIR}/deploy/kubernetes"){ + sh(label: '[File Creation] Create-Needed-Manifest', script: """ + WITHOUTCONFIG=true make generate-k8s + ./creator_k8s_manifest.sh . """) + sh(label: '[Clone] Kibana-Repository', script: """ + make ci-clone-kibana-repository + cp Makefile ./kibana + cd kibana + make ci-create-kubernetes-templates-pull-request """) + } + } + } + post { + always { + junit(allowEmptyResults: true, keepLongStdio: true, testResults: "${BASE_DIR}/build/TEST-*.xml") + } + } + } + } + } stage('e2e tests') { when { // Always when running builds on branches/tags diff --git a/deploy/kubernetes/Makefile b/deploy/kubernetes/Makefile index 35745dcec31..42bb611fa40 100644 --- a/deploy/kubernetes/Makefile +++ b/deploy/kubernetes/Makefile @@ -1,10 +1,15 @@ ALL=elastic-agent-standalone elastic-agent-managed BEAT_VERSION=$(shell head -n 1 ../../version/docs/version.asciidoc | cut -c 17- ) -.PHONY: generate-k8s $(ALL) +#variables needed for ci-create-kubernetes-templates-pull-request +ELASTIC_AGENT_REPO=kibana +ELASTIC_AGENT_REPO_PATH=x-pack/plugins/fleet/server/services/ +FILE_REPO=elastic_agent_manifest.ts +ELASTIC_AGENT_BRANCH=update-k8s-templates-$(shell date "+%Y%m%d%H%M%S") +.PHONY: generate-k8s $(ALL) generate-k8s: $(ALL) - + test: generate-k8s for FILE in $(shell ls *-kubernetes.yaml); do \ BEAT=$$(echo $$FILE | cut -d \- -f 1); \ @@ -14,10 +19,64 @@ test: generate-k8s clean: @for f in $(ALL); do rm -f "$$f-kubernetes.yaml"; done -$(ALL): - @echo "Generating $@-kubernetes.yaml" - @rm -f $@-kubernetes.yaml +$(ALL): +ifdef WITHOUTCONFIG + @echo "Generating $@-kubernetes-without-configmap.yaml" + @rm -f $@-kubernetes-without-configmap.yaml + @for f in $(shell ls $@/*.yaml | grep -v daemonset-configmap); do \ + sed "s/%VERSION%/VERSION/g" $$f >> $@-kubernetes-without-configmap.yaml; \ + echo --- >> $@-kubernetes-without-configmap.yaml; \ + done +else + @echo "Generating $@-kubernetes.yaml" + @rm -f $@-kubernetes.yaml @for f in $(shell ls $@/*.yaml); do \ sed "s/%VERSION%/${BEAT_VERSION}/g" $$f >> $@-kubernetes.yaml; \ echo --- >> $@-kubernetes.yaml; \ - done + done +endif + +CHDIR_SHELL := $(SHELL) +define chdir + $(eval _D=$(firstword $(1) $(@D))) + $(info $(MAKE): cd $(_D)) $(eval SHELL = cd $(_D); $(CHDIR_SHELL)) +endef + +## ci-clone-kibana-repository : Clone Kibana Repository and copy new files for the PR +.PHONY: ci-clone-kibana-repository +ci-clone-kibana-repository: + git clone git@github.com:elastic/kibana.git + cp $(FILE_REPO) $(ELASTIC_AGENT_REPO)/$(ELASTIC_AGENT_REPO_PATH) + +## ci-create-kubernetes-templates-pull-request : Create the pull request for the kubernetes templates +.PHONY: ci-create-kubernetes-templates-pull-request +ci-create-kubernetes-templates-pull-request: + HASDIFF=$$(git status | grep $(FILE_REPO) | wc -l); \ + if [ $${HASDIFF} -ne 1 ]; \ + then \ + echo "No differences found with kibana git repository" && \ + exit 1; \ + fi + echo "INFO: Create branch to update k8s templates" + git checkout -b $(ELASTIC_AGENT_BRANCH) + echo "INFO: add files if any" + git add $(ELASTIC_AGENT_REPO_PATH)$(FILE_REPO) + echo "INFO: commit changes if any" + git diff --staged --quiet || git commit -m "[Automated PR] Publish kubernetes templates for elastic-agent" + echo "INFO: show remote details" + git remote -v +ifeq ($(DRY_RUN),TRUE) + echo "INFO: skip pushing branch" +else + echo "INFO: push branch" + @git push --set-upstream origin $(ELASTIC_AGENT_BRANCH) + echo "INFO: create pull request" + @gh pr create \ + --title "Update kubernetes templates for elastic-agent" \ + --body "Automated by ${BUILD_URL}" \ + --label automation \ + --base main \ + --head $(ELASTIC_AGENT_BRANCH) \ + --reviewer elastic/obs-cloudnative-monitoring +endif + diff --git a/deploy/kubernetes/creator_k8s_manifest.sh b/deploy/kubernetes/creator_k8s_manifest.sh new file mode 100755 index 00000000000..e162613f498 --- /dev/null +++ b/deploy/kubernetes/creator_k8s_manifest.sh @@ -0,0 +1,58 @@ +#!/bin/bash +#### +# Bash Script that creates the needed https://github.com/elastic/kibana/blob/main/x-pack/plugins/fleet/server/services/elastic_agent_manifest.ts +# The script takes as an argument the path of elastic-agent manifests +# Eg. ./creator_k8s_manifest.sh deploy/kubernetes +#### + + +STANDALONE=elastic-agent-standalone-kubernetes-without-configmap.yaml +MANAGED=elastic-agent-managed-kubernetes-without-configmap.yaml +OUTPUT_FILE=elastic_agent_manifest.ts + +#Check if arguments provided +((!$#)) && echo "No arguments provided!Please provide path of elastic-agent files" && exit 1 +MANIFEST_PATH=$1 + +#Check if file elastic-agent-standalone-kubernetes-without-configmap.yaml exists +if [ ! -f "$MANIFEST_PATH/$STANDALONE" ]; then + echo "$MANIFEST_PATH/$STANDALONE does not exists" + exit 1 +fi + +#Check if file elastic-agent-managed-kubernetes-without-configmap.yaml exists +if [ ! -f "$MANIFEST_PATH/$MANAGED" ]; then + echo "$MANIFEST_PATH/$MANAGED does not exists" + exit 1 +fi + +#Start creation of output file +cat << EOF > $OUTPUT_FILE +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +export const elasticAgentStandaloneManifest = \`--- +EOF + +cat $MANIFEST_PATH/$STANDALONE >> $OUTPUT_FILE +echo "\`;" >> $OUTPUT_FILE + +cat << EOF >> $OUTPUT_FILE + +export const elasticAgentManagedManifest = \`--- +EOF + +cat $MANIFEST_PATH/$MANAGED >> $OUTPUT_FILE +echo -n "\`;" >> $OUTPUT_FILE + +#Replacing all occurencies of elastic-agent-standalone +sed -i -e 's/elastic-agent-standalone/elastic-agent/g' $OUTPUT_FILE + +#Remove ES_HOST entry from file +sed -i -e '/# The Elasticsearch host to communicate with/d' $OUTPUT_FILE +sed -i -e '/ES_HOST/d' $OUTPUT_FILE +sed -i -e '/value: ""/d' $OUTPUT_FILE \ No newline at end of file diff --git a/deploy/kubernetes/elastic-agent-managed-kubernetes.yaml b/deploy/kubernetes/elastic-agent-managed-kubernetes.yaml index 90727654ff7..acb8f8d5ea2 100644 --- a/deploy/kubernetes/elastic-agent-managed-kubernetes.yaml +++ b/deploy/kubernetes/elastic-agent-managed-kubernetes.yaml @@ -45,7 +45,7 @@ spec: # Elasticsearch API key used to enroll Elastic Agents in Fleet (https://www.elastic.co/guide/en/fleet/current/fleet-enrollment-tokens.html#fleet-enrollment-tokens) # If FLEET_ENROLLMENT_TOKEN is empty then KIBANA_HOST, KIBANA_FLEET_USERNAME, KIBANA_FLEET_PASSWORD are needed - name: FLEET_ENROLLMENT_TOKEN - value: "" + value: "token-id" - name: KIBANA_HOST value: "http://kibana:5601" # The basic authentication username used to connect to Kibana and retrieve a service_token to enable Fleet diff --git a/deploy/kubernetes/elastic-agent-managed/elastic-agent-managed-daemonset.yaml b/deploy/kubernetes/elastic-agent-managed/elastic-agent-managed-daemonset.yaml index fac245ca69a..878b15b8a6e 100644 --- a/deploy/kubernetes/elastic-agent-managed/elastic-agent-managed-daemonset.yaml +++ b/deploy/kubernetes/elastic-agent-managed/elastic-agent-managed-daemonset.yaml @@ -45,7 +45,7 @@ spec: # Elasticsearch API key used to enroll Elastic Agents in Fleet (https://www.elastic.co/guide/en/fleet/current/fleet-enrollment-tokens.html#fleet-enrollment-tokens) # If FLEET_ENROLLMENT_TOKEN is empty then KIBANA_HOST, KIBANA_FLEET_USERNAME, KIBANA_FLEET_PASSWORD are needed - name: FLEET_ENROLLMENT_TOKEN - value: "" + value: "token-id" - name: KIBANA_HOST value: "http://kibana:5601" # The basic authentication username used to connect to Kibana and retrieve a service_token to enable Fleet diff --git a/deploy/kubernetes/elastic-agent-standalone-kubernetes.yaml b/deploy/kubernetes/elastic-agent-standalone-kubernetes.yaml index 9b2ffc3a017..dc283ce40b8 100644 --- a/deploy/kubernetes/elastic-agent-standalone-kubernetes.yaml +++ b/deploy/kubernetes/elastic-agent-standalone-kubernetes.yaml @@ -1,4 +1,4 @@ -# For more information refer to https://www.elastic.co/guide/en/fleet/current/running-on-kubernetes-standalone.html +# For more information refer https://www.elastic.co/guide/en/fleet/current/running-on-kubernetes-standalone.html apiVersion: v1 kind: ConfigMap metadata: @@ -624,6 +624,7 @@ data: # period: 10s # condition: ${kubernetes.labels.app} == 'redis' --- +# For more information refer to https://www.elastic.co/guide/en/fleet/current/running-on-kubernetes-standalone.html apiVersion: apps/v1 kind: DaemonSet metadata: @@ -664,7 +665,7 @@ spec: value: "elastic" # The basic authentication password used to connect to Elasticsearch - name: ES_PASSWORD - value: "" + value: "changeme" # The Elasticsearch host to communicate with - name: ES_HOST value: "" diff --git a/deploy/kubernetes/elastic-agent-standalone/elastic-agent-standalone-daemonset-configmap.yaml b/deploy/kubernetes/elastic-agent-standalone/elastic-agent-standalone-daemonset-configmap.yaml index 7048bf22adb..1e42f94af15 100644 --- a/deploy/kubernetes/elastic-agent-standalone/elastic-agent-standalone-daemonset-configmap.yaml +++ b/deploy/kubernetes/elastic-agent-standalone/elastic-agent-standalone-daemonset-configmap.yaml @@ -1,4 +1,4 @@ -# For more information refer to https://www.elastic.co/guide/en/fleet/current/running-on-kubernetes-standalone.html +# For more information refer https://www.elastic.co/guide/en/fleet/current/running-on-kubernetes-standalone.html apiVersion: v1 kind: ConfigMap metadata: diff --git a/deploy/kubernetes/elastic-agent-standalone/elastic-agent-standalone-daemonset.yaml b/deploy/kubernetes/elastic-agent-standalone/elastic-agent-standalone-daemonset.yaml index 3205b13bf88..59d9b318543 100644 --- a/deploy/kubernetes/elastic-agent-standalone/elastic-agent-standalone-daemonset.yaml +++ b/deploy/kubernetes/elastic-agent-standalone/elastic-agent-standalone-daemonset.yaml @@ -1,3 +1,4 @@ +# For more information refer to https://www.elastic.co/guide/en/fleet/current/running-on-kubernetes-standalone.html apiVersion: apps/v1 kind: DaemonSet metadata: @@ -38,7 +39,7 @@ spec: value: "elastic" # The basic authentication password used to connect to Elasticsearch - name: ES_PASSWORD - value: "" + value: "changeme" # The Elasticsearch host to communicate with - name: ES_HOST value: "" From b27a90780fdd0566c0575efe5bccc98289513b7f Mon Sep 17 00:00:00 2001 From: Florian Lehner Date: Wed, 17 Aug 2022 15:49:18 +0200 Subject: [PATCH 086/180] Reduce memory footprint by reordering struct elements (#804) * Reduce memory footprint by reordering struct elements * rename struct element for linter Signed-off-by: Florian Lehner Signed-off-by: Florian Lehner --- .../handlers/handler_action_unenroll.go | 5 +- .../pkg/agent/application/upgrade/upgrade.go | 10 ++-- .../agent/operation/operation_retryable.go | 2 +- internal/pkg/agent/stateresolver/resolve.go | 4 +- .../composable/providers/kubernetes/pod.go | 12 ++--- .../kubernetesleaderelection/config.go | 6 ++- internal/pkg/core/plugin/process/app.go | 53 +++++++++---------- internal/pkg/core/state/state.go | 4 +- internal/pkg/core/status/reporter.go | 28 +++++----- internal/pkg/crypto/io.go | 8 +-- .../pkg/fleetapi/acker/retrier/retrier.go | 15 +++--- internal/pkg/reporter/fleet/reporter.go | 10 ++-- 12 files changed, 79 insertions(+), 78 deletions(-) diff --git a/internal/pkg/agent/application/pipeline/actions/handlers/handler_action_unenroll.go b/internal/pkg/agent/application/pipeline/actions/handlers/handler_action_unenroll.go index 71fe0f30644..8abf094ee37 100644 --- a/internal/pkg/agent/application/pipeline/actions/handlers/handler_action_unenroll.go +++ b/internal/pkg/agent/application/pipeline/actions/handlers/handler_action_unenroll.go @@ -26,11 +26,11 @@ type stateStore interface { // Unenroll results in running agent entering idle state, non managed non standalone. // For it to be operational again it needs to be either enrolled or reconfigured. type Unenroll struct { + dispatcher pipeline.Router + stateStore stateStore log *logger.Logger emitter pipeline.EmitterFunc - dispatcher pipeline.Router closers []context.CancelFunc - stateStore stateStore } // NewUnenroll creates a new Unenroll handler. @@ -75,6 +75,7 @@ func (h *Unenroll) Handle(ctx context.Context, a fleetapi.Action, acker store.Fl } else if h.stateStore != nil { // backup action for future start to avoid starting fleet gateway loop h.stateStore.Add(a) + // nolint: errcheck // Ignore the error at this point. h.stateStore.Save() } diff --git a/internal/pkg/agent/application/upgrade/upgrade.go b/internal/pkg/agent/application/upgrade/upgrade.go index ce811036176..1c6a85fa9d9 100644 --- a/internal/pkg/agent/application/upgrade/upgrade.go +++ b/internal/pkg/agent/application/upgrade/upgrade.go @@ -44,15 +44,15 @@ var ( // Upgrader performs an upgrade type Upgrader struct { - agentInfo *info.AgentInfo + reporter stateReporter + caps capabilities.Capability + reexec reexecManager + acker acker settings *artifact.Config + agentInfo *info.AgentInfo log *logger.Logger closers []context.CancelFunc - reexec reexecManager - acker acker - reporter stateReporter upgradeable bool - caps capabilities.Capability } // Action is the upgrade action state. diff --git a/internal/pkg/agent/operation/operation_retryable.go b/internal/pkg/agent/operation/operation_retryable.go index b30fd68563c..53544cec92b 100644 --- a/internal/pkg/agent/operation/operation_retryable.go +++ b/internal/pkg/agent/operation/operation_retryable.go @@ -18,8 +18,8 @@ import ( // if nth operation fails all preceding are retried as well type retryableOperations struct { logger *logger.Logger - operations []operation retryConfig *retry.Config + operations []operation } func newRetryableOperations( diff --git a/internal/pkg/agent/stateresolver/resolve.go b/internal/pkg/agent/stateresolver/resolve.go index 5afe2256cb6..526ad8befa3 100644 --- a/internal/pkg/agent/stateresolver/resolve.go +++ b/internal/pkg/agent/stateresolver/resolve.go @@ -60,10 +60,10 @@ func (s *state) String() string { } type active struct { - LastChange stateChange + Program program.Program LastModified time.Time Identifier string - Program program.Program + LastChange stateChange } func (s *active) String() string { diff --git a/internal/pkg/composable/providers/kubernetes/pod.go b/internal/pkg/composable/providers/kubernetes/pod.go index a8b11b06585..034df3c7a72 100644 --- a/internal/pkg/composable/providers/kubernetes/pod.go +++ b/internal/pkg/composable/providers/kubernetes/pod.go @@ -23,15 +23,15 @@ import ( ) type pod struct { - logger *logp.Logger - cleanupTimeout time.Duration - comm composable.DynamicProviderComm - scope string - config *Config - metagen metadata.MetaGen watcher kubernetes.Watcher nodeWatcher kubernetes.Watcher + comm composable.DynamicProviderComm + metagen metadata.MetaGen namespaceWatcher kubernetes.Watcher + config *Config + logger *logp.Logger + scope string + cleanupTimeout time.Duration // Mutex used by configuration updates not triggered by the main watcher, // to avoid race conditions between cross updates and deletions. diff --git a/internal/pkg/composable/providers/kubernetesleaderelection/config.go b/internal/pkg/composable/providers/kubernetesleaderelection/config.go index d92d35566a2..7ccc2f9a799 100644 --- a/internal/pkg/composable/providers/kubernetesleaderelection/config.go +++ b/internal/pkg/composable/providers/kubernetesleaderelection/config.go @@ -8,10 +8,12 @@ import "github.com/elastic/elastic-agent-autodiscover/kubernetes" // Config for kubernetes_leaderelection provider type Config struct { - KubeConfig string `config:"kube_config"` - KubeClientOptions kubernetes.KubeClientOptions `config:"kube_client_options"` + KubeConfig string `config:"kube_config"` + // Name of the leaderelection lease LeaderLease string `config:"leader_lease"` + + KubeClientOptions kubernetes.KubeClientOptions `config:"kube_client_options"` } // InitDefaults initializes the default values for the config. diff --git a/internal/pkg/core/plugin/process/app.go b/internal/pkg/core/plugin/process/app.go index acb38ee92df..3e2778674e9 100644 --- a/internal/pkg/core/plugin/process/app.go +++ b/internal/pkg/core/plugin/process/app.go @@ -35,34 +35,32 @@ var ( // Application encapsulates a concrete application ran by elastic-agent e.g Beat. type Application struct { - bgContext context.Context - id string - name string - pipelineID string - logLevel string - desc *app.Descriptor - srv *server.Server - srvState *server.ApplicationState - limiter *tokenbucket.Bucket - startContext context.Context - tag app.Taggable - state state.State - reporter state.Reporter - watchClosers map[int]context.CancelFunc + state state.State + startContext context.Context + statusReporter status.Reporter + monitor monitoring.Monitor + reporter state.Reporter + tag app.Taggable + bgContext context.Context + srvState *server.ApplicationState + limiter *tokenbucket.Bucket + srv *server.Server + desc *app.Descriptor + restartCanceller context.CancelFunc + logger *logger.Logger + watchClosers map[int]context.CancelFunc + processConfig *process.Config + restartConfig map[string]interface{} + + name string + id string + pipelineID string + logLevel string uid int gid int - monitor monitoring.Monitor - statusReporter status.Reporter - - processConfig *process.Config - - logger *logger.Logger - - appLock sync.Mutex - restartCanceller context.CancelFunc - restartConfig map[string]interface{} + appLock sync.Mutex } // ArgsDecorator decorates arguments before calling an application @@ -79,8 +77,8 @@ func NewApplication( logger *logger.Logger, reporter state.Reporter, monitor monitoring.Monitor, - statusController status.Controller) (*Application, error) { - + statusController status.Controller, +) (*Application, error) { s := desc.ProcessSpec() uid, gid, err := s.UserGroup() if err != nil { @@ -157,7 +155,6 @@ func (a *Application) Stop() { a.logger.Error(err) } - } a.appLock.Lock() @@ -231,6 +228,7 @@ func (a *Application) watch(ctx context.Context, p app.Taggable, proc *process.I a.setState(state.Restarting, msg, nil) // it was a crash + // nolint: errcheck // Ignore the error at this point. a.start(ctx, p, cfg, true) }() } @@ -277,6 +275,7 @@ func (a *Application) setState(s state.Status, msg string, payload map[string]in } func (a *Application) cleanUp() { + // nolint: errcheck // Ignore the error at this point. a.monitor.Cleanup(a.desc.Spec(), a.pipelineID) } diff --git a/internal/pkg/core/state/state.go b/internal/pkg/core/state/state.go index 080efb42c88..a15a8ba4c3c 100644 --- a/internal/pkg/core/state/state.go +++ b/internal/pkg/core/state/state.go @@ -76,9 +76,9 @@ func FromProto(s proto.StateObserved_Status) Status { // State wraps the process state and application status. type State struct { ProcessInfo *process.Info - Status Status - Message string Payload map[string]interface{} + Message string + Status Status } // Reporter is interface that is called when a state is changed. diff --git a/internal/pkg/core/status/reporter.go b/internal/pkg/core/status/reporter.go index 92632af2ed5..9e0f47a9b56 100644 --- a/internal/pkg/core/status/reporter.go +++ b/internal/pkg/core/status/reporter.go @@ -39,19 +39,19 @@ func (s AgentStatusCode) String() string { // AgentApplicationStatus returns the status of specific application. type AgentApplicationStatus struct { + Payload map[string]interface{} ID string Name string - Status state.Status Message string - Payload map[string]interface{} + Status state.Status } // AgentStatus returns the overall status of the Elastic Agent. type AgentStatus struct { - Status AgentStatusCode + UpdateTime time.Time Message string Applications []AgentApplicationStatus - UpdateTime time.Time + Status AgentStatusCode } // Controller takes track of component statuses. @@ -68,15 +68,15 @@ type Controller interface { } type controller struct { - mx sync.Mutex - status AgentStatusCode - message string updateTime time.Time + log *logger.Logger reporters map[string]*reporter appReporters map[string]*reporter - log *logger.Logger stateID string + message string agentID string + status AgentStatusCode + mx sync.Mutex } // NewController creates a new reporter. @@ -272,15 +272,15 @@ type Reporter interface { } type reporter struct { - name string - mx sync.Mutex - isPersistent bool - isRegistered bool - status state.Status - message string payload map[string]interface{} unregisterFunc func() notifyChangeFunc func() + message string + name string + status state.Status + mx sync.Mutex + isRegistered bool + isPersistent bool } // Update updates the status of a component. diff --git a/internal/pkg/crypto/io.go b/internal/pkg/crypto/io.go index 738a216774a..2012bdf1b5c 100644 --- a/internal/pkg/crypto/io.go +++ b/internal/pkg/crypto/io.go @@ -21,11 +21,11 @@ import ( // Option is the default options used to generate the encrypt and decrypt writer. // NOTE: the defined options need to be same for both the Reader and the writer. type Option struct { + Generator bytesGen IterationsCount int KeyLength int SaltLength int IVLength int - Generator bytesGen // BlockSize must be a factor of aes.BlockSize BlockSize int @@ -180,7 +180,6 @@ func (w *Writer) Write(b []byte) (int, error) { } func (w *Writer) writeBlock(b []byte) error { - // randomly generate the salt and the initialization vector, this information will be saved // on disk in the file as part of the header iv, err := w.generator(w.option.IVLength) @@ -189,12 +188,14 @@ func (w *Writer) writeBlock(b []byte) error { return w.err } + // nolint: errcheck // Ignore the error at this point. w.writer.Write(iv) encodedBytes := w.gcm.Seal(nil, iv, b, nil) l := make([]byte, 4) binary.LittleEndian.PutUint32(l, uint32(len(encodedBytes))) + // nolint: errcheck // Ignore the error at this point. w.writer.Write(l) _, err = w.writer.Write(encodedBytes) @@ -325,7 +326,7 @@ func (r *Reader) consumeBlock() error { } encodedBytes := make([]byte, l) - _, err = io.ReadAtLeast(r.reader, encodedBytes, int(l)) + _, err = io.ReadAtLeast(r.reader, encodedBytes, l) if err != nil { r.err = errors.Wrapf(err, "fail read the block of %d bytes", l) } @@ -364,7 +365,6 @@ func (r *Reader) Close() error { func randomBytes(length int) ([]byte, error) { r := make([]byte, length) _, err := rand.Read(r) - if err != nil { return nil, err } diff --git a/internal/pkg/fleetapi/acker/retrier/retrier.go b/internal/pkg/fleetapi/acker/retrier/retrier.go index 406d6570611..38961cc1803 100644 --- a/internal/pkg/fleetapi/acker/retrier/retrier.go +++ b/internal/pkg/fleetapi/acker/retrier/retrier.go @@ -32,19 +32,19 @@ type Option func(*Retrier) // Retrier implements retrier for actions acks type Retrier struct { - log *logger.Logger acker BatchAcker // AckBatch provider + log *logger.Logger - initialRetryInterval time.Duration // initial retry interval - maxRetryInterval time.Duration // max retry interval - maxRetries int // configurable maxNumber of retries per action + doneCh chan struct{} // signal channel to kickoff retry loop if not running + kickCh chan struct{} // signal channel when retry loop is done actions []fleetapi.Action // pending actions - mx sync.Mutex - kickCh chan struct{} // signal channel to kickoff retry loop if not running + maxRetryInterval time.Duration // max retry interval + maxRetries int // configurable maxNumber of retries per action + initialRetryInterval time.Duration // initial retry interval - doneCh chan struct{} // signal channel when retry loop is done + mx sync.Mutex } // New creates new instance of retrier @@ -173,7 +173,6 @@ func (r *Retrier) runRetries(ctx context.Context) { default: } r.log.Debug("ack retrier: exit retry loop") - } func (r *Retrier) updateRetriesMap(retries map[string]int, actions []fleetapi.Action, resp *fleetapi.AckResponse) (failed []fleetapi.Action) { diff --git a/internal/pkg/reporter/fleet/reporter.go b/internal/pkg/reporter/fleet/reporter.go index d334a9b45cd..edf5008bc01 100644 --- a/internal/pkg/reporter/fleet/reporter.go +++ b/internal/pkg/reporter/fleet/reporter.go @@ -18,7 +18,7 @@ import ( type event struct { AgentID string `json:"agent_id"` EventType string `json:"type"` - Ts fleetapi.Time `json:"timestamp"` + TS fleetapi.Time `json:"timestamp"` SubType string `json:"subtype"` Msg string `json:"message"` Payload map[string]interface{} `json:"payload,omitempty"` @@ -29,7 +29,7 @@ func (e *event) Type() string { } func (e *event) Timestamp() time.Time { - return time.Time(e.Ts) + return time.Time(e.TS) } func (e *event) Message() string { @@ -38,12 +38,12 @@ func (e *event) Message() string { // Reporter is a reporter without any effects, serves just as a showcase for further implementations. type Reporter struct { + lastAck time.Time info agentInfo logger *logger.Logger queue []fleetapi.SerializableEvent - qlock sync.Mutex threshold int - lastAck time.Time + qlock sync.Mutex } type agentInfo interface { @@ -70,7 +70,7 @@ func (r *Reporter) Report(ctx context.Context, e reporter.Event) error { r.queue = append(r.queue, &event{ AgentID: r.info.AgentID(), EventType: e.Type(), - Ts: fleetapi.Time(e.Time()), + TS: fleetapi.Time(e.Time()), SubType: e.SubType(), Msg: e.Message(), Payload: e.Payload(), From 5316967696ee2db440264ef84800035af05da1f1 Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Thu, 18 Aug 2022 01:34:27 -0400 Subject: [PATCH 087/180] [Automation] Update elastic stack version to 8.5.0-6b9f92c0 for testing (#948) Co-authored-by: apmmachine --- testing/environments/snapshot.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/testing/environments/snapshot.yml b/testing/environments/snapshot.yml index 664c2dd7ec4..830eaf17846 100644 --- a/testing/environments/snapshot.yml +++ b/testing/environments/snapshot.yml @@ -3,7 +3,7 @@ version: '2.3' services: elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch:8.5.0-60a4c029-SNAPSHOT + image: docker.elastic.co/elasticsearch/elasticsearch:8.5.0-6b9f92c0-SNAPSHOT # When extend is used it merges healthcheck.tests, see: # https://github.com/docker/compose/issues/8962 # healthcheck: @@ -42,7 +42,7 @@ services: - ./docker/logstash/pki:/etc/pki:ro kibana: - image: docker.elastic.co/kibana/kibana:8.5.0-60a4c029-SNAPSHOT + image: docker.elastic.co/kibana/kibana:8.5.0-6b9f92c0-SNAPSHOT environment: - "ELASTICSEARCH_USERNAME=kibana_system_user" - "ELASTICSEARCH_PASSWORD=testing" From 4eff4f1bc777ebb4423d0801154867a79221e156 Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Mon, 22 Aug 2022 01:36:02 -0400 Subject: [PATCH 088/180] [Automation] Update elastic stack version to 8.5.0-0616acda for testing (#963) Co-authored-by: apmmachine --- testing/environments/snapshot.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/testing/environments/snapshot.yml b/testing/environments/snapshot.yml index 830eaf17846..59f8b53298b 100644 --- a/testing/environments/snapshot.yml +++ b/testing/environments/snapshot.yml @@ -3,7 +3,7 @@ version: '2.3' services: elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch:8.5.0-6b9f92c0-SNAPSHOT + image: docker.elastic.co/elasticsearch/elasticsearch:8.5.0-0616acda-SNAPSHOT # When extend is used it merges healthcheck.tests, see: # https://github.com/docker/compose/issues/8962 # healthcheck: @@ -42,7 +42,7 @@ services: - ./docker/logstash/pki:/etc/pki:ro kibana: - image: docker.elastic.co/kibana/kibana:8.5.0-6b9f92c0-SNAPSHOT + image: docker.elastic.co/kibana/kibana:8.5.0-0616acda-SNAPSHOT environment: - "ELASTICSEARCH_USERNAME=kibana_system_user" - "ELASTICSEARCH_PASSWORD=testing" From 4fffa70f01eac446c02a5ea6fcfc962edd07fa8a Mon Sep 17 00:00:00 2001 From: Quentin Pradet Date: Mon, 22 Aug 2022 17:04:25 +0400 Subject: [PATCH 089/180] Clarify that this repo is not only docs (#969) --- README.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 2c0dbe31f69..faecad7b707 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,6 @@ -# Elastic Agent developer docs +# Elastic Agent + +## Developer docs The source files for the general Elastic Agent documentation are currently stored in the [observability-docs](https://github.com/elastic/observability-docs) repo. The following docs are only focused on getting developers started building code for Elastic Agent. From c0367b97a255484c3414995f32cefe6de3a12f17 Mon Sep 17 00:00:00 2001 From: Andrew Kroh Date: Mon, 22 Aug 2022 13:19:46 -0400 Subject: [PATCH 090/180] Add Filebeat lumberjack input to spec (#959) Make the lumberjack input available from Agent. Relates: https://github.com/elastic/beats/pull/32175 --- CHANGELOG.next.asciidoc | 1 + internal/pkg/agent/program/supported.go | 2 +- internal/spec/filebeat.yml | 1 + 3 files changed, 3 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 9f35f0dffcd..6f7715bdebf 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -198,3 +198,4 @@ - Add liveness endpoint, allow fleet-gateway component to report degraded state, add update time and messages to status output. {issue}390[390] {pull}569[569] - Redact sensitive information on diagnostics collect command. {issue}[241] {pull}[566] - Fix incorrectly creating a filebeat redis input when a policy contains a packetbeat redis input. {issue}[427] {pull}[700] +- Add `lumberjack` input type to the Filebeat spec. {pull}[959] diff --git a/internal/pkg/agent/program/supported.go b/internal/pkg/agent/program/supported.go index c6d78b20f84..52685137b97 100644 --- a/internal/pkg/agent/program/supported.go +++ b/internal/pkg/agent/program/supported.go @@ -27,7 +27,7 @@ func init() { // internal/spec/metricbeat.yml // internal/spec/osquerybeat.yml // internal/spec/packetbeat.yml - unpacked := packer.MustUnpack("eJzce1uTqkiX9v33M/r2m5nmUFZvJuK9EKpA0KJa3JJJ3pGZFqgJ2iWoODH/fSKTg4DUsXv6fWMuKmLvFPKwch2e9azFf/2yTrPVaxqyXw/7Ffk13Cf/fli9Hlev/1Ek7Jf//AUnZoZ+7qK5r7sz32UkRYxE+w0G83vbMk94IV8QdBQE7WkAHSkEKA7Uwd9SctlF4LSLbMPOvIV9sA0nC8AoRoqfITCSZomfB8A5IDDX6MSR0cI+GOtxZK9l016fIjuhG6jqjCQuw+lccyaZvnyUf3q+AzzfefEkbTK/7M5PD7pmR3tqJP4dsbSCWv4WKjKjE2cfqE/3tnmY2sZ4HUA9m8HqTGv7YDBpSlL/gODTPV93ttA3WNVHUPWOUDnviToX47YxjmyLSQhI97aFDgj4UjM+8Y7Pa32PU12mk6epGDPGEVZGL4Gi5Sg570v5jI5YHfPfM9uSY/Kwa54llimFD7sIJWeG4Pw63tpbPTZb6AUC8pEm/kuo+KPnaNf8Vv7prwhu+X1sAsW/EFmLicXEs9+aZ+KwUqYsR6f2M1JEEj/DKmJQydjq5/U89Z+Yd63z+87peCfeQQm7g6orkcSP8c9dtFKlSiZojyceI0xTAnCWO+eeuAxb/oZaWjEk62odaQV1dn0HxXjiM3Lp7CsTejpv9nKgll9cz65fEDizQPWOJL2R+8265XyaTCe6XJ7vKpvWXWa2xfIw8TfU1HYImFsEncvzWv/tZb5XQ8vPn9f6AYFRSq1o50yyah1Xmy7G/99+GEcBGG1tK46JlLHVItqulGrNiXSwDcqwZV6oxTZE8WOSuDunOEWO6jBksYtTnPge0lAxk1B5TGfGOMWWlhLVi4kSpdP57h+//FvfK+R0neFVmPWdAvS3IfTWCJiSkbgZethFQWfMLFDL4GbgaT0Thnd9Zpb4hwC6UgiesgBQbtj5CsgHe30q31kuxTtE8SUKx3kIzlnbUaDEPBBluZ6NO47lgqAnE2MkISCfsGVKaDFiODHX2PK3vwOuDC7rr4GBmYs9AD+nRut56BUU3KxxwApNQzBKZ8mZ0cQ//A48FqR+2p8XKe4Lsfwi4Ir/IG1Xkul7j2w53/q6b2qTnxJ9eN48np4mUn+NmAJvTy2HBXCelw6EpbNEPqKJcAY5AqM44Mr1ICcBOF/Qwp7C+T4mqbdHibmh/E4SP6aTp2PnblKHiUtPvRfunLDqS2TiS1BxdwEYpfx+hROe71pOcdhgjfVTNAfng/1oyshikljPqAzEKA2RFPoaAbrHa10NobezDVqezfiRzqLdgMG7L1RhUmhqBQKUrSbjygBrR+SNiLUsjW/isOf1OIVJuf6suHt1KudEFP+AgCth1b7nhsfPSE67qV3oOrK8i23QPU48tnrYRcIQCvk3ZJlS4GsXOnFYAKRc/B+6jE6kbKV4MbXMF6J6BQJmNktGMQb+hVjmBkEp5QHAtvw4UKIoBKMThfOcyy0Eoz/4PqDCcmT5d7WToRN24rIWa7f3NfFUXOgxsrwXlDCGxZheYMVlRHW5Q+JO6IiTZYQSrbAtXymDqNjfBcF55Si0HC30BKs2n3sbQC9u7m8xEv/ntjUzbn4T9zYz9Bgn86gvE+EDoHsKgMvK+2ucWvWM/EImzlEEHUVrnOptIGQ5UfyCmlqMUo+Rt+XygJWRgqAj1fcIZCmv74HIGeXj1PpRyd278N+FzsJYItwWTO0QQnEH3C9c6j3VAbd+r9KzPbX87Hmt1+epxyV8aT0rzt4K4BM9plbU1rWh4L2xH8vfSz32L7aFjmSt6y1753abhuAuEnZgjNPSfudHp7iLPIgYSZkU8kDM9YTLbq1LoWVy+VyadSZCPhIC8yiATxG1YmZblf9Y6HkAZH5/dVDjcsqJco6p5b9hf12QIu5p3gp2N7YY5dNim7bfFfM+7CLn4XHaAg9iLx3gYaECK1LX1o3rHWCgbSk4s5ZME9t6jBp7NfQ9KfQLsfxNCNBeyEDRcpr4BfdLlayEzjhKbTceD8bYEQBPpk5x1+wRA/MV+VpMUifuApxGVxp76pzX+nFvW9f5q98yLisMNAX5mvBp7XcqENOf7zPvNHHrq+s0tj/ffQwKLQHyhDz5PYdAy5t7MB1GoL8nyTIKeFyw3CNO0B4VuoQLfYMVmdkTb0cSTUZKVOrko5ly/2UbcY4L/YAVN8aGnoTgzEixzb9wljKumFrBdQMqaI8tv7LTt0BlC+htbvxABaArm6lkQxPzQIF/acmEg0mh82jcHhPrv4SAYxFahNBrAW8pIiq7cN0xot3GfjS3aPIU0Qk92pa5RaaWhMA/8LG2LyWFAJq1X49q3BSCQZ/fAEKolDKeJWw7MwbGUzcPoH4JLU0a/p37HHePlbteTCt/F7Et+XFvTzKtlLd7JJP6jvSEJFp2A9jLhKmNc6Z87r4/QKlz5Hijix27Pra0swYbTSu80gH1VzzgNHc3W3TXb+y9wkSN7nGdh+JdVt5XFfMMvQHytT8p/WwFzA2u86MkBFQmiSlspRcze4nAbkqVmOENj39+jlRvNzW837pnr8/5pxIDkQyQhM/7JJKBLyUGhO1yOpQYKOcjBb3EoDPWSwz+pUD+nn4eHPeAdvuM0V44oCrjFI5g1YAjqTSSdZVJJnvufLiT2CIYtTJdEfjubet8RCpX6O7zNfiBagsYbnbvsA7DYMuuQDrPPLFCubMUQBRbp85d1qDmClochhSW2xO6p1YUOYp7wKq/LRMAklf75U5cQtB5qZ0mVPnz8QtJ/BTB+NTKuG/BPTfaZHlvG3TVlu9svUs/YgT68iodpZBzwgFEmby092gPsgy3rEHX8dw4jOhvyeQjR8lilGRx+W+uIy4HPjun2E5vjHWV0v1unfZt1QOjLbG0PU7n0VLxNxQ6ezrZTgNF3pZgwjthheXUkC8IuDJJmLTq6T26zpEhy1eMVLBq+0BZ3tsPgfr8EE0D4ArHx4OEsCW11KFn4Qj9HE30Iw+SRnI+Ilk7BdDblXLn4MRRQ3B3bxv28afF1iQxi9VCM2v5zKTr+zMhA4/NuDwKrbV/6Y8Zn7uw+ZyHEIxknvTZa+1IJvOjB84xUb19UGjm9R3tQoVf0Q5YIcf2OafrER9bc32hIqnTVHLZRfb26R6a5zlJtJQkZmY/lgAEmudmv+Lf9RrmmXBfQy2fQG7n8pkMrpO4OwTcVyE/1YuxdbqvkhsWyJoASv2Exk5acoEuC1RfABC7eq5iZZuAZ/MAn7BktbCvY2sp44HAboLleE1Ujweroh6jFssQ0GSuC0+X8ZRY2oWafP+uxAFgdcd3CLg8mT2gn71kyXCG9azeh1XGiyYQG04zd3tfs4Xc3En13IVaHiOp3Rqzsxn0T0h1YmQte+MOI4omk8RlpGjJ4A05dp8f3YdwvK6TxBDIjPuD5/VYeXoYT8nEYVD18xCMuE4d8MNuOlvobGX5mxKkLmugJHT/eT1et/WAXG2zXiMmCe0CUMORcdLox7oNNPv3OCyfgX1/SP5cQRZUeyzre0xtlUC0E8g+qObyafRiLOR1E0tugZ19k3wPJ0Z6n1nOeLJMwVKcCQPz1LenDiM/cWRsdfb6YbIkYm/iF51kZHMrq7ZNdqsAUi85uWWcxT4g2hOFHXE0DCTLOb1eHDoznFApNHgcquSnSnv74S56qkig0DIvC8Uf8TnqWPey4LGnJnHdCwJmESjRAIDccxDM8U/M7Yj7Spxoks3XUx0Zp94eg2UeQGcTTqTo959S5ChmgX8GklOU6zuTrKBgJHR0lqAYA3ZYwepZERdjjjvK8xjebyT1c+GbFqMsAPsjSatnLySdLsa3MfJlzVYDeNbjPguUyUEVF4W/FThnvC/tcK3jTrUqdRmd+KdZwg74E7i0xr48GSTWOabWcojc7VXE5BgnPPmSRQLYel4iqX+LfS0t5XgNFaMDgojhB3mLgCOj4sNKm7VYns1PENZXuRijKukheU3UkGJADqJSZxbUYgnHCM9rXeA7UmhtGRcI8rjuSFDhGKLSeVPbhJaZI44zKtv9yF+9jbdvMPDfQWo6FUle+7U9TkiEE58JIsD6wW3uRCw/F1g70Qq00GOcbiOsejsE7Zpk5vlIjFM3waqTBdDbc1ub8STVdBla6Ds68U7ksjvOFIG95WDN70c6oivuafDmDFQJv8IuRPUZKX5ITxuS/y/7+piqT1mF8dUAeptw3P2NXJ4aPxjAvUySZSb0OPV2FFzjdTVHglUet53R1Ve6R5x63P80dzJb6FusuK8IXt8v55wfBVa5kmGMJn6FCb09jm7GpaCs6HbmoMA7XdcvsWCVA16fVdzTDOpykLpycJ1X3BlUWti4mSeW6ET/gyjalRiz9kcKnTwA522LsBD5wfX/52NdtZ0t9BLTWtomVPyi80yhFQh4eyJrF2xpKtfX3u+XFXT5+1eyhPuWrrwyAr3WnkeMWuiA1dY7lyfFBSVp1ib40JXAzuyJKyGLtQhAd0SUzjk6cu/Ie+LnIYyuvyksF2f5HmlfEVrshePOwa6AmoSaVD4HtqvD+hEzTfhEbDGRN+FEO5J2pdjy7wLFP/HfQOE0hYfSJzd8SBkvOoSgQ/t7pJb2gi12oQ+DRPWJJNoGQfeCVefy3f1zLIuET65801xgtILHIQyWnyH+6pz4WmSpcgXuz7iPFmcwNbHvq6/p3Zfa22/dndE/R6874y2c1yOA2zxOqUelrz3wPLOD8+p93RYssgDqpw6/UI7zWFPmbiLukG7XRF3Uq/KDtv3Wf7Wf4TmZyJG79lKv0/ZRfH+bjt615uH287zWZTQZ9/Yi7L72lfe25R0DJWtxSBXnZGnprOIooOoesEr5ue57dt06PzkSlXE/kz6v9Ru/8nZXSc0V+Rfka0cKPR4HP/+e1S3kfrtYshgJopqoehwoy2+v3yrc/umcqsGB4zdjrcCHPYxTdubwvD1Bx5qIrrug+kR8z0ZEQQ61cPIXyPcWzmvGbu6Ty56o3pEkZX52LT7ELAA8h32qCg+D3Fsbl/7zO2pe2GqVDXfaeWW+Fy3rXLHiz6s8MGtyyCuHvp4t5PLuDDnDisdsJvW68coutabb7TPcdNTtirrRww9y7re43IHCWTvPzVo58l+zvtXEzQ/3UDe8vOsHKhu7cr3lPuu9QEUUgN7ikSNQ6Am2fEaNUd0dmddzXZuhmjw6gk1xveaFrrZRcdd18VIUGfGgfERhCTd6kNZdj6MTVnjOshXFw4G1av+SPxnNs1deXczjvSDLTwLoH+hkuEvvlj+/2ccOq67U67C7kZPoPBzurMtrvZklXP5+gRPzAFX9SNL5R2tfiHK66b6s/dtsM877OtnGDC1Zlftt9tfHDa0coM37tP6G/GP3T6qLvTfvVnnBkaiNXmQBbDVtvIFx39znO3GwbQcUuqzJx8ff6ggdbD74U3MMxuSvnevNJrDvnbGNb7/TbdrTrfEQF3gTO2/qi5YfI8sXfkhwtyndIY4ZO9xfaR8vi230+3p8si0zR8ZfXccaqE/Fq/B1qMt0YfkxSbvF5LAz1gqEny0kf4N4+0rb+qe6RG+CNAdO3mXGAQ10pO8TXxWognEDLiswNxxQZU10ZEIOaBT/rt2KPtxK7fBkZUVU7mhjJi62+JFPTwOK3OumeY/Af78L521wO0Dkd0HuJ4rAbznmtx3yx8bXdcytzpFBo/trCPemk0PIhr4GAL0GC8KNUJCaPMEIDbI3on/cgtJklb2uyYAF/gS+RBK2qTSy+vij+qBCqWjx4Q886haNPbakj+nsmjJPPYahLtLrT7SBfPEjkvMRKXSPE5JjkV6fNGT5awpIf940kLUTgs6Gz/v7wvvt59JfLrfs4RM0eIagV4TArXp16v63EYfnomw0dC7jTY/QlTVJfH6TBTW1I2Z16uu9BEoc44Ry6yw1P21KVG+0n3yiL7m6e3LaTT/demJxTyXOfAhLSr+BigFAUkO1Nu0tzcc57/Z+DaWc9VjTB1zuswV9+lDW7ffz3tA7gaKdVr4WY+v8Fo0m1m6t2YIPN2fPsaKd2l4CwXiDoC4JSJ42FJWIRmFF7zW2Ygh96tB1PHL09iphuext7pYdaxqs0zf+3XNc7zDxk6oH7m+l6j4duQbKDaKFyLzLp4VW2+bFGb//jcA/u9z8FRry7Zaqd2nJgts4XEe/Lh/OIsr/vr57nS5uZVTOI9q87m3DayOCElKXsaI9d037dtFDv0+zVS4IQMagYhYkMUeDetz4iR4ML3Wl2fMXe6xb732FPuzTCn8r5fidvvC/hrbs0a1/cR9wJ26Kd/u6VX8T0MUf+VBsaMrOHVu2b3u+03aJpxtra7m82wv8dTqyhw/+BXp5d4c/8tVrMQT/VPdMgV+suh0QR6KaMoLOqN8F8YUOiK9Dv6908n6q88H5frdv9Vkd7svn3U5H7UKgz0i6nX6vo69+n5UduZ/t5Jt4ewz8I4Xze/vh8dTuTn4jWWxXBupP5Hqwre5Kbql/7V58rQVZGpN7p4JoJjy0v92J8Sc+3eh8KthnkblpV/c370Lc4W7hT1c1et8H/+3dwR9UJW67sbrdVTypjDXbeNRsg1yeH4J0MGnch2S7GqJtlpa5CRVf6iSNEx5sMlZ/aNEkjQXJvDJN/SBh5M/cPPtu/7+QSiGbZY/0+/1S3WffTBTTtz6wJd0zf5+6+ZMUSfejvDfpkVMA3Fc0wBP+H+uT/6g+N/3lv//f/wQAAP//vp+ZfQ==") + unpacked := packer.MustUnpack("eJzce1mTo0h39v33M+b2s/2ylGoaR7wXgmo2qagRapGQd2SmCiQlSFMCSeDwf3dksggQqqVnPB77oiK6U5DLybM85zmH//hlk2brtzSk/zge1vgf4SH51+P67bR++7ciob/8+y8o0TP4Yx8tPNWZew7FKaQ4OmwRWDxahn5GS7GEvi1B35oFvi2EAMaBPPpbist9BM77yNKszF1aR0uzswBMYih5GQQTYZ54eQDsIwQLhZi2CJfWUdtMI2sj6tbmHFkJ2fqySnHiUJQuFNvM1NV38Yfr2cD17FdXUMxFub88P6mKFR2IlngP2FAKYng7XxIpMe1DID8/WvpxZmnTTeCr2dyvz7SxjhoVZjj1jtB/fmTrzpfqFsnqxJfdky9dDlhe8HFLm0aWQQUIhEfLgEcIPKEdN93Ty0Y9oFQVifk842PaNELS5DWQlBwml0Mln8kJyVP2e2YZYoyf9u2z2NCF8GkfweRCob+4jnf21ozNl2oBgXgiifcaSt7kJdq3v1V/6hv0d+w+toHklVhUYmxQ/uxPzWPatJIpzeG5+4wQ4cTLkAypL2V0/eN6nuaPz7tR2X3nZLrn78CEPviyI+DEi9GPfbSWhVom8IBMl2KqSAG4iL1zmw5FhrclhlKMybpeR1j7Kr2+A2NkehSXvX1lXE8X7V6OxPCK69nVEoILDWT3hNMbud+sW82niMRUxep8V9l07jKzDJqHibclurKHQN9B3y5fNuqvr4uDHBpe/rJRjxBMUmJEe9vM6nUcZbac/n/raRoFYLKzjDjGQkbXy2i3luo1TeFoaYQiQy+JQbdY8mKcOHu7OEe2bFNo0NIuzmwPaSjpSSh9T+faNEWGkmLZjbEUpbPF/p+//MvQK+Rkk6F1mA2dgu/tQt/dQKALWuJk8GkfBb0xvYAdg5uD582cG971mXniHQPfEULwnAWAMMPO10A8Wptz9c5qxd/BkicQf5qH4JJ1HQVM9COWVpv5tOdYSui7ItYmAgTiGRm6AJcTihJ9gwxv9xtgyuDQ4RoI6DnfA/ByonWe992CgJs1jkgiaQgm6Ty5UJJ4x9+AS4PUS4fzQsl5xYZXBEzxn4TdWtA99ztdLXae6umK+UMgTy/b7+dnUxiuERPgHohh08Bf5JUDoek8EU/Q5M4gh2ASB0y5nsQkAJcSLq2ZvzjEOHUPMNG3hN1J4sXEfD717ia1Kb/01H1lzgnJnoBNT/AlZx+AScrulzvhxb7jFMcNVts8RwtwOVrfdREaVODrabWBaJUh4kLdQEAOaKPKoe/uLY1UZ9O+pfNoP2LwziuRqBDqSgEBoWtzWhtg44jcCTZWlfGZNn3ZTFM/qdafFw9vdu2csOQdIXAEJFuPzPDYGfF5P7MKVYWGW1oaOaDEpeunfcQNoRB/hYYuBJ5SEtOmARBy/n/focQUsrXkxsTQX7HsFhDo2TyZxAh4JTb0LfSFlAUAy/DiQIqiEEzOxF/kTG4hmPzO9uFLNIeG99A4GWLSM5M1X7u7L9OVUaHG0HBfYUIp4mNqgSSHYtlhDok5oRNKVhFMlMIyPKkKonx/JfQXtaNQcrhUEyRbbO5d4Ltxe3/LCf8/s625dvMbv7e5psYoWURDmXAf4DvnADi0ur/WqdXPiK/YtE886EhK61RvAyHNseQVRFdimLoU35fLE5ImEvRtoblHIAp5cw9YzAgbJ8a3Wu5uyX7nOuvHAma2oCvH0Od3wPxC2eypCbjNe7WeHYjhZS8btTlPMy6gsvMsP3sngJtqTIyoq2tjwXtrfa9+r/TYKy0DnvBGVTv2zuw2DcFDxO1Am6aV/S5OdvEQuT6kOKVCyAIx0xMmu40qhIbO5FO265hcPgIEiyjwnyNixNQyav+xVPMAiOz+mqDG5JRj6RITw7tjf32Qwu9p0Ql2N7YY5bNil3bf5fM+7SP76fusAx74XnrAw4AFkoS+rWvXO0BA2RFwoR2ZJpbxPWrtVVMPuFBLbHjbEMADl4Gk5CTxCuaXallxnbGlxm5cFoyRzQGeSOziod0jAvob9JQYp3bcBzitrrT21Duv8e3RMq7z179lTFYIKBL0FO7Tuu/UIGY432feaePWV9dpbX+x/xgUGhzkcXmyew6Bkrf3oNsU+94BJ6soYHHBcE4ogQdYqAIq1C2SRGqZ7h4nigilqNLJ73rK/JelxTkq1COSnBhpahKCC8XFLv/CWaq4oisF0w1fggdkeLWd3gOVHaC3vfEDNYCubaaWDUn0IwFe2ZEJA5Nc5+G0O8bXfw0BwyKkCH23A7yFCMu0ZLqjRfut9V3fQfM5IiY5WYa+g7qShMA7srGuL8UFB5qNX48a3BSCUZ/fAkJfqmQ8T+huro2Mp04e+GoZGoow/jvzOc4BSQ+DmFb9zmNb8u3RMjOlkrdzwmZzR2qCEyW7AexVwtTFOTM299AfwNQ+MbzRx459H1vZWYuNZjVe6YH6Kx6w27ubL/vrt/ZeY6JW95jO+/xdWt1XHfM0tQXyjT+p/GwNzDWm85MkBETEic5tZRAzB4nAfkakmKIti39eDmV3P9PcX/tnb875hxIDngzghM37zJOBLyUGmO5zMpYYSJcTAYPEoDc2SAz+ViD/QD4PjgdAu3vG6MAdUJ1xckewbsGRUBnJps4kkwNzPsxJ7KAfdTJdHvgeLeNygjJT6P7zDfjx5Q4w3O7fYR3GwZZVg3SWeSKJMGfJgSgyzr27bEDNFbTYFEo0t0xyIEYU2ZJzRLK3qxIAnNf7ZU5cgL792jhNX2bPx6848VLox+dOxn0L7pnRJqtHSyPrrnznm336ESMwlFflKLmcEwYgquSlu0drlGW4ZQ36jufGYUR/SSYf2VIWwySLq38zHXEY8NnbxW52Y6zrlBz2m3Roqy6Y7LChHFC6iFaStyW+fSDmbhZI4q4CE+4ZSTQnmlhC4Ig4ocJ6oPfwOkcGDU/SUs6qHQJp9Wg9BfLLUzQLgMMdHwsS3JbkSodeuCP0cmiqJxYkteRygqJyDnx3X8mdgRNbDsHDo6VZpx8G3eBEL9ZLRW/kMxeu78+5DFw6Z/IolM7+hd/nbO7CYnMeQzARWdJnbZQTNhcnF1xiLLuHoFD06ztKSbhfUY5IwqfuOWebCRvbMH0hPKlTZFzuI2v3/OjrlwVOlBQnemZ9rwCIr1/a/fJ/N2voF8x8DTE87DM7Fy94dJ3E2UPgvHH5yW6MjPNjndzQQFQ4UBomNFbSkYvv0ED2OACx6udqVrYNeBYL8AlN1kvrOrYRMhYIrDZYTjdYdlmwKpoxYtAMAkVkuvBcTmfYUEqis/07AgOA9R0/QOCwZPYIfwySJc0e17NmH0YVL9pArNnt3N19zZdieyf1cyUxXIpTqzNmZXPfO0PZjqGxGozbFEuKiBOH4qIjgzty7D8/eQz96aZJEkMgUuYPXjZT6flpOsOmTX3Zy0MwYTp1RE/72Xyp0rXhbSuQumqAEtf9l81009UDfLXNZo0YJ6QPQDVbREmrH5su0Bze47h8Rvb9IflzBVm+PGBZ32Nq6wSim0AOQTWTT6sXUy6vm1hyC+ysm+R7PDFSh8xyxpJlAlb8TAjo56E99Rh50xaR0dvrh8kSj72JV/SSke2trLo22a8CCIPk5JZx5vvw4QFL9ISicSBZzekO4tCFooQIocbiUC0/WThYTw/Rc00ChYZeLiVvwuZoYt3rksWehsR1Sgj0IpCiEQB5YCCY4Z+Y2RHzlShRBIutJ9siSt0DAqs88O1taArRbz+EyJb0Av0IBLuo1rfNrCBgwnV0nsAYAXpc+/WzPC7GDHdU59HcX3Hq5dw3LSdZAA4nnNbPljidLae3MfJ1Q9cjeNZlPgtUyUEdF7m/5ThneqjscKOiXrUqdSgxvfM8oUf0CVzaYF+WDGLjEhNjNUbuDipiYowSlnyJPAHsPC/g1LvFvoaSMrwGi8kR+pCiJ3EHgS3C4sNKm7FcXfRPENZXuWiTOunBeUPU4GJEDrxSpxfEoAnDCC8bleM7XChdGRfQZ3HdFnyJYYha53VlGxp6DhnOqG33I391H2/fYOC/gtS0a5K88WsHlOAIJR7lRIDxjdncGRtezrF2ohRwqcYo3UVIdvfQtxqSmeUjMUqdBMl2FvjugdnanCWpukPhUt0T0z3jcn+aSxx7i8GG3Y9wglfc0+LNOagTfomWWPYoLr4Jz1uc/zf7+pjIz1mN8eXAd7fhtP8bLp9bPxj4BxEnq4zrceruCbjG63qOBMksbtuTq690Tih1mf9p72S+VHdIct6gf32/mnNx4ljlSoZRkng1JnQPKLoZF4KqotubgwD3fF2/woJ1Dnh9VnLOc18Vg9QRg+u8/M58qYON23ligZjq71hSrsSYcTgR384DcNl1CAueH1z/fzk1Vdv5Uq0wraFsQ8kres8USgGBe8CiUiJDkZm+Dn4v177D3r+SJcy39OV1JEDcQN/+PQDO2/V98ULMDsmSeAJMLidy3dfpuQzEmuzuEsS70L/eP8OgIXiYdYm03j5Nd4uv+iNC83o3BEwO6+tvcgg4xr4b9weEYDevr/VNOa89JUbG5ZUYyisyaEme9jeEdVus6OaNBvNnnCA9hpX/y5GknLvEJvTjLfRV9huy0tqf+VUxtfX5Gve/Z5woW+g7JZLt0qLCcI8CEqsiSB+f1N0M/QLTz+6fYXOKU/c1SLykJssq4raKp7MeuW+MkXgN/mjPOvuZQs5wv6HvUhYHhuO9Nc8cZ2aBPx3g8SFxeYP/OsWT1eeKJYYYY1OlXc6mGr+cYJ0TdG32qg8d/1THk+56ljbwVdv9wF6adeqioexuMduf4ZzvzMPs59EyvR2e9vfC1p5L7imQMnaOaOBPmnm4r6ls2HnFzD+z/JzpVM+uO+e/+phHy3Qm7J2O7/oQaxPfoS1OmH7+vUGhs+t/7nS3NAWCdwrDXyio3C0+/8X5z59I7ndwXPcM1m3xJe3spYqVFZd6P9d5gz4tGeaAi30f2xleDA2P5VJVnpqSPQQPj/0857pGW0z4gzlPS6bzPZK3AMC3YImPlkY4riSGXoYaPmjRP28J9Ve6XmfjHXhulQdGqyaHrHn1Oj/M2tzyyq1v5kux6rbSxAxJLmXxoN+lV3WvtV1wn+Gso3631A0O/EAX73G8IwW1bv6bdXLnP2d9o60vfLiHphHm3QJkbb9XDrjaZ7MXZj8sztzhlyNQqAkyPEq0SdM1mTdzXZukWl2L/DaOtPHpRHyX5QINp93YFS8+olH58IITavUgbbohJ2cksVxmx4uKI2s1dp4/a+2zV769jv/Q8JLA947EHO/eu+XVb/axR7IjDDrvbuTEOxLHO+7yRm+YfTN9QYl+9GX1hNPFR2uXWDrfdGU22GK+neZDnexix46sqv22+xvix46/6/JBnb+xomX/T2iKwDfv1vnCCcutXnSwzf2Glbv7fKcA/1H8/TiWTrvxarQp4Q/NsZzwojKW1TiQVj91rnfi88/s79pcFP1UF+pAt6ZjHGHz7NA/fDVWcvt4Xe6i3zbTs2XoOdT+7PrWSN0qXodvY92nS8OLcdovMoe9sU4g/GyB+ScIua+0s3+qe/QmSNs0YA5Hiikb/3lCrC50+nELUutug/GAKiq8U9OXbQol76Hboj7eYm3Tl426xjJztDHlF1t8y2fnEUUedNm8B2zf787pGGZzN1dHe9s63iRqqUc/Uxy+55jvO+SPja/vmDsdJaNG9zcApck6e9vgEQv8ATwBJ3Rba2T9UUj9oYVU0+XjH340rRsHZAgf09wNlZ66FPnqkZd3P24P+eLHJSzlJgeU4BzxtPisQMPbEICH86aBqJyhb2/ZvL8t3V9/rLzVakefPkGPZ9B3ixA4dcrR9MVNGDzndMLYubS7HqEva5x47CYLoisnRJVa49zXQIpjlBBmnZXmpy11cact5RP9yvXd13TJ51pS3qOKABRaCrZte2k/2nm3J2xAz/X6cztUzwe0jjPs8/1b0XwhWPzvovsM74GYdsx0gstJV3iP7LVcMICdHaqvv8fB+OADq9GSxL3INVKG4K1F+kM+K5TGNkt7+v63A//TZeiP6Oi6FMT95P1Wq84d3PZXF8zG/U30j9XThUf53zYPb7PlrYyqeXj716OluV1EcEv5aj2qqI8ehv2bV8osC0BGfUkvcKJPRvW49RMDGF7pSrvnL/Zed95bfIEGHNIKX6cOf7pvu/M9xx+YYyRF+sQZTK+EntJJMf5UCrEXN98vJ/RiYj4WG9py9PkrdGQ/1jZyebdHuCeDmAbApWzO6vyjLYIDfPA36PHdH3/P12/FGPyTnQsBXrHud0acsKyL0Lcnw+6IL3RGfB36faXD91MdEfbPdwHXn9uhoXze7YBUSux7FKe72c91+jXv06pT97MdfqZ7QMA7EX/xaD19P3e7lu8ki92uiObTuQFsa7qVO+rfuBdP6UCW1uQYXOXdIsigXIYh4Owecx0JC+33OzT+wCcdvU8IhywyM+36/gaVhfEu4k9/ajD4bvgv7xr+4Pvf2y6tftcVSypjxdK+K5aGy5enIB1NGg8h3q3HaJuVoW9DyRN6SaPJgk1Gmw8w2qSxwJlbpakfJIzsmZtn3/0ugEulEPWqd/r9Pqr+s3cTxfTeh7e4f+afp27+IEXSrz/fpUfOAXDe4AhP+H+sf/6jL+Fnv/zn//uvAAAA//8jBKIe") SupportedMap = make(map[string]Spec) for f, v := range unpacked { diff --git a/internal/spec/filebeat.yml b/internal/spec/filebeat.yml index e4dbd2a9892..f387f3cdc32 100644 --- a/internal/spec/filebeat.yml +++ b/internal/spec/filebeat.yml @@ -46,6 +46,7 @@ rules: - log/redis_slowlog - log/syslog - logfile + - lumberjack - mqtt - netflow - o365audit From 1632dd1858dd9e18b42c8f8cfccb228251aba79d Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Tue, 23 Aug 2022 01:34:43 -0400 Subject: [PATCH 091/180] [Automation] Update elastic stack version to 8.5.0-dd6f2bb0 for testing (#978) Co-authored-by: apmmachine --- testing/environments/snapshot.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/testing/environments/snapshot.yml b/testing/environments/snapshot.yml index 59f8b53298b..c2c426cde5d 100644 --- a/testing/environments/snapshot.yml +++ b/testing/environments/snapshot.yml @@ -3,7 +3,7 @@ version: '2.3' services: elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch:8.5.0-0616acda-SNAPSHOT + image: docker.elastic.co/elasticsearch/elasticsearch:8.5.0-dd6f2bb0-SNAPSHOT # When extend is used it merges healthcheck.tests, see: # https://github.com/docker/compose/issues/8962 # healthcheck: @@ -42,7 +42,7 @@ services: - ./docker/logstash/pki:/etc/pki:ro kibana: - image: docker.elastic.co/kibana/kibana:8.5.0-0616acda-SNAPSHOT + image: docker.elastic.co/kibana/kibana:8.5.0-dd6f2bb0-SNAPSHOT environment: - "ELASTICSEARCH_USERNAME=kibana_system_user" - "ELASTICSEARCH_PASSWORD=testing" From 4fdc4451a1b11fdb66dee5402f8ceb610fff7246 Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Wed, 24 Aug 2022 01:34:31 -0400 Subject: [PATCH 092/180] [Automation] Update elastic stack version to 8.5.0-feb644de for testing (#988) Co-authored-by: apmmachine --- testing/environments/snapshot.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/testing/environments/snapshot.yml b/testing/environments/snapshot.yml index c2c426cde5d..0517a0bcc6a 100644 --- a/testing/environments/snapshot.yml +++ b/testing/environments/snapshot.yml @@ -3,7 +3,7 @@ version: '2.3' services: elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch:8.5.0-dd6f2bb0-SNAPSHOT + image: docker.elastic.co/elasticsearch/elasticsearch:8.5.0-feb644de-SNAPSHOT # When extend is used it merges healthcheck.tests, see: # https://github.com/docker/compose/issues/8962 # healthcheck: @@ -42,7 +42,7 @@ services: - ./docker/logstash/pki:/etc/pki:ro kibana: - image: docker.elastic.co/kibana/kibana:8.5.0-dd6f2bb0-SNAPSHOT + image: docker.elastic.co/kibana/kibana:8.5.0-feb644de-SNAPSHOT environment: - "ELASTICSEARCH_USERNAME=kibana_system_user" - "ELASTICSEARCH_PASSWORD=testing" From 010e15f5ee93950554a4cd042a3374a7e12114df Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Thu, 25 Aug 2022 01:37:42 -0400 Subject: [PATCH 093/180] [Automation] Update elastic stack version to 8.5.0-7783a03c for testing (#1004) Co-authored-by: apmmachine --- testing/environments/snapshot.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/testing/environments/snapshot.yml b/testing/environments/snapshot.yml index 0517a0bcc6a..6c437497423 100644 --- a/testing/environments/snapshot.yml +++ b/testing/environments/snapshot.yml @@ -3,7 +3,7 @@ version: '2.3' services: elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch:8.5.0-feb644de-SNAPSHOT + image: docker.elastic.co/elasticsearch/elasticsearch:8.5.0-7783a03c-SNAPSHOT # When extend is used it merges healthcheck.tests, see: # https://github.com/docker/compose/issues/8962 # healthcheck: @@ -42,7 +42,7 @@ services: - ./docker/logstash/pki:/etc/pki:ro kibana: - image: docker.elastic.co/kibana/kibana:8.5.0-feb644de-SNAPSHOT + image: docker.elastic.co/kibana/kibana:8.5.0-7783a03c-SNAPSHOT environment: - "ELASTICSEARCH_USERNAME=kibana_system_user" - "ELASTICSEARCH_PASSWORD=testing" From 1f3f40977d9e5d6cce6a1eeb6ed4b674a17fc165 Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Fri, 26 Aug 2022 01:35:25 -0400 Subject: [PATCH 094/180] [Automation] Update elastic stack version to 8.5.0-17b8a62d for testing (#1014) Co-authored-by: apmmachine --- testing/environments/snapshot.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/testing/environments/snapshot.yml b/testing/environments/snapshot.yml index 6c437497423..a62e875f82b 100644 --- a/testing/environments/snapshot.yml +++ b/testing/environments/snapshot.yml @@ -3,7 +3,7 @@ version: '2.3' services: elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch:8.5.0-7783a03c-SNAPSHOT + image: docker.elastic.co/elasticsearch/elasticsearch:8.5.0-17b8a62d-SNAPSHOT # When extend is used it merges healthcheck.tests, see: # https://github.com/docker/compose/issues/8962 # healthcheck: @@ -42,7 +42,7 @@ services: - ./docker/logstash/pki:/etc/pki:ro kibana: - image: docker.elastic.co/kibana/kibana:8.5.0-7783a03c-SNAPSHOT + image: docker.elastic.co/kibana/kibana:8.5.0-17b8a62d-SNAPSHOT environment: - "ELASTICSEARCH_USERNAME=kibana_system_user" - "ELASTICSEARCH_PASSWORD=testing" From 91d48547d8edcea392b37ce34ca842ed3077107d Mon Sep 17 00:00:00 2001 From: Julien Mailleret <8582351+jmlrt@users.noreply.github.com> Date: Fri, 26 Aug 2022 12:30:40 +0200 Subject: [PATCH 095/180] update ironbank image product name (#1009) This is required to automate the creation of the ironbank merge requests as the ubireleaser is using this field to compute the elastic-agent artifact url. For example it is now trying to retrieve https://artifacts.elastic.co/downloads/beats/elastic-agent-8.4.0-linux-x86_64.tar.gz instead of https://artifacts.elastic.co/downloads/beats/elastic-agent/elastic-agent-8.4.0-linux-x86_64.tar.gz --- .../packaging/templates/ironbank/hardening_manifest.yaml.tmpl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-tools/packaging/templates/ironbank/hardening_manifest.yaml.tmpl b/dev-tools/packaging/templates/ironbank/hardening_manifest.yaml.tmpl index 3c753caa0fb..e4b4df82e23 100644 --- a/dev-tools/packaging/templates/ironbank/hardening_manifest.yaml.tmpl +++ b/dev-tools/packaging/templates/ironbank/hardening_manifest.yaml.tmpl @@ -35,7 +35,7 @@ labels: ## This value can be "opensource" or "commercial" mil.dso.ironbank.image.type: "commercial" ## Product the image belongs to for grouping multiple images - mil.dso.ironbank.product.name: "beats" + mil.dso.ironbank.product.name: "elastic-agent" # List of resources to make available to the offline build context resources: From 9c0cb456bbf73dfc87159a97759611de0151fba1 Mon Sep 17 00:00:00 2001 From: Victor Martinez Date: Fri, 26 Aug 2022 12:01:52 +0100 Subject: [PATCH 096/180] ci: add extended support for windows (#683) --- .ci/Jenkinsfile | 72 +++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 72 insertions(+) diff --git a/.ci/Jenkinsfile b/.ci/Jenkinsfile index 764e13952b1..d3c082c5ae9 100644 --- a/.ci/Jenkinsfile +++ b/.ci/Jenkinsfile @@ -39,6 +39,10 @@ pipeline { // disabled by default, but required for merge: // opt-in with 'ci:end-to-end' tag on PR booleanParam(name: 'end_to_end_tests_ci', defaultValue: false, description: 'Enable End-to-End tests') + + // disabled by default, but required for merge: + // opt-in with 'ci:extended-windows' tag on PR + booleanParam(name: 'extended_windows_ci', defaultValue: false, description: 'Enable Extended Windows tests') } stages { stage('Checkout') { @@ -51,6 +55,7 @@ pipeline { setEnvVar('ONLY_DOCS', isGitRegionMatch(patterns: [ '.*\\.(asciidoc|md)' ], shouldMatchAll: true).toString()) setEnvVar('PACKAGING_CHANGES', isGitRegionMatch(patterns: [ '(^dev-tools/packaging/.*|.ci/Jenkinsfile)' ], shouldMatchAll: false).toString()) setEnvVar('K8S_CHANGES', isGitRegionMatch(patterns: [ '(^deploy/kubernetes/.*|^version/docs/version.asciidoc|.ci/Jenkinsfile)' ], shouldMatchAll: false).toString()) + setEnvVar('EXT_WINDOWS_CHANGES', isGitRegionMatch(patterns: [ '.ci/Jenkinsfile' ], shouldMatchAll: false).toString()) } } } @@ -299,6 +304,66 @@ pipeline { wait: true) } } + stage('extended windows') { + when { + // Always when running builds on branches/tags + // Enable if extended windows support related changes. + beforeAgent true + anyOf { + not { changeRequest() } + expression { return isExtendedWindowsEnabled() && env.ONLY_DOCS == "false"} + } + } + failFast false + matrix { + agent {label "${PLATFORM} && windows-immutable"} + options { skipDefaultCheckout() } + axes { + axis { + name 'PLATFORM' + values 'windows-8', 'windows-10', 'windows-11' + } + } + stages { + stage('build'){ + options { skipDefaultCheckout() } + steps { + withGithubNotify(context: "Build-${PLATFORM}") { + deleteDir() + unstashV2(name: 'source', bucket: "${JOB_GCS_BUCKET}", credentialsId: "${JOB_GCS_CREDENTIALS}") + withMageEnv(){ + dir("${BASE_DIR}"){ + cmd(label: 'Go build', script: 'mage build') + } + } + } + } + } + stage('Test') { + options { skipDefaultCheckout() } + steps { + withGithubNotify(context: "Test-${PLATFORM}") { + withMageEnv(){ + dir("${BASE_DIR}"){ + withEnv(["TEST_COVERAGE=${isCodeCoverageEnabled()}"]) { + cmd(label: 'Go unitTest', script: 'mage unitTest') + } + } + } + } + } + post { + always { + junit(allowEmptyResults: true, keepLongStdio: true, testResults: "${BASE_DIR}/build/TEST-*.xml") + whenTrue(isCodeCoverageEnabled()) { + coverageReport(baseDir: "**/build", reportFiles: 'TEST-go-unit.html', coverageFiles: 'TEST-go-unit-cov.xml') + } + } + } + } + } + } + } } post { cleanup { @@ -412,3 +477,10 @@ def isE2eEnabled() { def isPackageEnabled() { return env.PACKAGING_CHANGES == "true" || env.GITHUB_COMMENT?.contains('package') || matchesPrLabel(label: 'ci:package') } + +/** +* Wrapper to know if the build should enable the windows extended support +*/ +def isExtendedWindowsEnabled() { + return env.EXT_WINDOWS_CHANGES == "true" || params.extended_windows_ci || env.GITHUB_COMMENT?.contains('extended windows') || matchesPrLabel(label: 'ci:extended-windows') +} From 9298e017906ec6f340ac840378144f9d6be6c335 Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Mon, 29 Aug 2022 09:43:25 -0400 Subject: [PATCH 097/180] [Automation] Update elastic stack version to 8.5.0-9aed3b11 for testing (#1030) Co-authored-by: apmmachine --- testing/environments/snapshot.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/testing/environments/snapshot.yml b/testing/environments/snapshot.yml index a62e875f82b..f4e041b1618 100644 --- a/testing/environments/snapshot.yml +++ b/testing/environments/snapshot.yml @@ -3,7 +3,7 @@ version: '2.3' services: elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch:8.5.0-17b8a62d-SNAPSHOT + image: docker.elastic.co/elasticsearch/elasticsearch:8.5.0-9aed3b11-SNAPSHOT # When extend is used it merges healthcheck.tests, see: # https://github.com/docker/compose/issues/8962 # healthcheck: @@ -42,7 +42,7 @@ services: - ./docker/logstash/pki:/etc/pki:ro kibana: - image: docker.elastic.co/kibana/kibana:8.5.0-17b8a62d-SNAPSHOT + image: docker.elastic.co/kibana/kibana:8.5.0-9aed3b11-SNAPSHOT environment: - "ELASTICSEARCH_USERNAME=kibana_system_user" - "ELASTICSEARCH_PASSWORD=testing" From 3e89bfcca25d476b54acd951fb8efaa2ba7b774a Mon Sep 17 00:00:00 2001 From: Andrew Gizas Date: Tue, 30 Aug 2022 11:25:47 +0300 Subject: [PATCH 098/180] Cloudnative ci utomation (#1035) * Updating Jenkinsfile and Makefile to open PR * Adding needed token-id --- .ci/Jenkinsfile | 2 +- deploy/kubernetes/Makefile | 2 ++ deploy/kubernetes/creator_k8s_manifest.sh | 10 +++++----- .../elastic-agent-standalone-kubernetes.yaml | 2 +- .../elastic-agent-standalone-daemonset-configmap.yaml | 2 +- 5 files changed, 10 insertions(+), 8 deletions(-) diff --git a/.ci/Jenkinsfile b/.ci/Jenkinsfile index d3c082c5ae9..ab8f17167cb 100644 --- a/.ci/Jenkinsfile +++ b/.ci/Jenkinsfile @@ -483,4 +483,4 @@ def isPackageEnabled() { */ def isExtendedWindowsEnabled() { return env.EXT_WINDOWS_CHANGES == "true" || params.extended_windows_ci || env.GITHUB_COMMENT?.contains('extended windows') || matchesPrLabel(label: 'ci:extended-windows') -} +} \ No newline at end of file diff --git a/deploy/kubernetes/Makefile b/deploy/kubernetes/Makefile index 42bb611fa40..6d31e04e38e 100644 --- a/deploy/kubernetes/Makefile +++ b/deploy/kubernetes/Makefile @@ -58,6 +58,8 @@ ci-create-kubernetes-templates-pull-request: exit 1; \ fi echo "INFO: Create branch to update k8s templates" + git config user.name obscloudnativemonitoring + git config user.email obs-cloudnative-monitoring@elastic.co git checkout -b $(ELASTIC_AGENT_BRANCH) echo "INFO: add files if any" git add $(ELASTIC_AGENT_REPO_PATH)$(FILE_REPO) diff --git a/deploy/kubernetes/creator_k8s_manifest.sh b/deploy/kubernetes/creator_k8s_manifest.sh index e162613f498..245f43dcb3d 100755 --- a/deploy/kubernetes/creator_k8s_manifest.sh +++ b/deploy/kubernetes/creator_k8s_manifest.sh @@ -29,11 +29,11 @@ fi #Start creation of output file cat << EOF > $OUTPUT_FILE /* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ +* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +* or more contributor license agreements. Licensed under the Elastic License +* 2.0; you may not use this file except in compliance with the Elastic License +* 2.0. +*/ export const elasticAgentStandaloneManifest = \`--- EOF diff --git a/deploy/kubernetes/elastic-agent-standalone-kubernetes.yaml b/deploy/kubernetes/elastic-agent-standalone-kubernetes.yaml index dc283ce40b8..c5e4e7fc7e9 100644 --- a/deploy/kubernetes/elastic-agent-standalone-kubernetes.yaml +++ b/deploy/kubernetes/elastic-agent-standalone-kubernetes.yaml @@ -1,4 +1,4 @@ -# For more information refer https://www.elastic.co/guide/en/fleet/current/running-on-kubernetes-standalone.html +# For more information refer to https://www.elastic.co/guide/en/fleet/current/running-on-kubernetes-standalone.html apiVersion: v1 kind: ConfigMap metadata: diff --git a/deploy/kubernetes/elastic-agent-standalone/elastic-agent-standalone-daemonset-configmap.yaml b/deploy/kubernetes/elastic-agent-standalone/elastic-agent-standalone-daemonset-configmap.yaml index 1e42f94af15..7048bf22adb 100644 --- a/deploy/kubernetes/elastic-agent-standalone/elastic-agent-standalone-daemonset-configmap.yaml +++ b/deploy/kubernetes/elastic-agent-standalone/elastic-agent-standalone-daemonset-configmap.yaml @@ -1,4 +1,4 @@ -# For more information refer https://www.elastic.co/guide/en/fleet/current/running-on-kubernetes-standalone.html +# For more information refer to https://www.elastic.co/guide/en/fleet/current/running-on-kubernetes-standalone.html apiVersion: v1 kind: ConfigMap metadata: From 64f0c3cdd838c55aabe6cc49bd7dc4d693155737 Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Wed, 31 Aug 2022 01:50:49 -0400 Subject: [PATCH 099/180] [Automation] Update elastic stack version to 8.5.0-fedc3e60 for testing (#1054) Co-authored-by: apmmachine --- testing/environments/snapshot.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/testing/environments/snapshot.yml b/testing/environments/snapshot.yml index f4e041b1618..9e60e96183b 100644 --- a/testing/environments/snapshot.yml +++ b/testing/environments/snapshot.yml @@ -3,7 +3,7 @@ version: '2.3' services: elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch:8.5.0-9aed3b11-SNAPSHOT + image: docker.elastic.co/elasticsearch/elasticsearch:8.5.0-fedc3e60-SNAPSHOT # When extend is used it merges healthcheck.tests, see: # https://github.com/docker/compose/issues/8962 # healthcheck: @@ -42,7 +42,7 @@ services: - ./docker/logstash/pki:/etc/pki:ro kibana: - image: docker.elastic.co/kibana/kibana:8.5.0-9aed3b11-SNAPSHOT + image: docker.elastic.co/kibana/kibana:8.5.0-fedc3e60-SNAPSHOT environment: - "ELASTICSEARCH_USERNAME=kibana_system_user" - "ELASTICSEARCH_PASSWORD=testing" From f3852cf9c1ce40405688e52e6a1def6ee834d9f8 Mon Sep 17 00:00:00 2001 From: Andrew Gizas Date: Wed, 31 Aug 2022 16:24:16 +0300 Subject: [PATCH 100/180] Testing PR creation for 706 (#1049) --- deploy/kubernetes/elastic-agent-standalone-kubernetes.yaml | 4 ++-- .../elastic-agent-standalone-daemonset-configmap.yaml | 2 +- .../elastic-agent-standalone-daemonset.yaml | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/deploy/kubernetes/elastic-agent-standalone-kubernetes.yaml b/deploy/kubernetes/elastic-agent-standalone-kubernetes.yaml index c5e4e7fc7e9..9a8ff40e179 100644 --- a/deploy/kubernetes/elastic-agent-standalone-kubernetes.yaml +++ b/deploy/kubernetes/elastic-agent-standalone-kubernetes.yaml @@ -1,4 +1,4 @@ -# For more information refer to https://www.elastic.co/guide/en/fleet/current/running-on-kubernetes-standalone.html +# For more information refer https://www.elastic.co/guide/en/fleet/current/running-on-kubernetes-standalone.html apiVersion: v1 kind: ConfigMap metadata: @@ -624,7 +624,7 @@ data: # period: 10s # condition: ${kubernetes.labels.app} == 'redis' --- -# For more information refer to https://www.elastic.co/guide/en/fleet/current/running-on-kubernetes-standalone.html +# For more information refer https://www.elastic.co/guide/en/fleet/current/running-on-kubernetes-standalone.html apiVersion: apps/v1 kind: DaemonSet metadata: diff --git a/deploy/kubernetes/elastic-agent-standalone/elastic-agent-standalone-daemonset-configmap.yaml b/deploy/kubernetes/elastic-agent-standalone/elastic-agent-standalone-daemonset-configmap.yaml index 7048bf22adb..1e42f94af15 100644 --- a/deploy/kubernetes/elastic-agent-standalone/elastic-agent-standalone-daemonset-configmap.yaml +++ b/deploy/kubernetes/elastic-agent-standalone/elastic-agent-standalone-daemonset-configmap.yaml @@ -1,4 +1,4 @@ -# For more information refer to https://www.elastic.co/guide/en/fleet/current/running-on-kubernetes-standalone.html +# For more information refer https://www.elastic.co/guide/en/fleet/current/running-on-kubernetes-standalone.html apiVersion: v1 kind: ConfigMap metadata: diff --git a/deploy/kubernetes/elastic-agent-standalone/elastic-agent-standalone-daemonset.yaml b/deploy/kubernetes/elastic-agent-standalone/elastic-agent-standalone-daemonset.yaml index 59d9b318543..c4846b8b308 100644 --- a/deploy/kubernetes/elastic-agent-standalone/elastic-agent-standalone-daemonset.yaml +++ b/deploy/kubernetes/elastic-agent-standalone/elastic-agent-standalone-daemonset.yaml @@ -1,4 +1,4 @@ -# For more information refer to https://www.elastic.co/guide/en/fleet/current/running-on-kubernetes-standalone.html +# For more information refer https://www.elastic.co/guide/en/fleet/current/running-on-kubernetes-standalone.html apiVersion: apps/v1 kind: DaemonSet metadata: From 2522be42faa5f3ec8b562ba7a79b8646d4b160c8 Mon Sep 17 00:00:00 2001 From: Pier-Hugues Pellerin Date: Wed, 31 Aug 2022 10:49:29 -0400 Subject: [PATCH 101/180] Fix lookup issues with inputs.d fragment yml (#840) * Fix lookup issues with inputs.d fragment yml The Elastic Agent was looking next to the binary for the `inputs.d` folder instead it should look up into the `Home` folder where the Elastic Agent symlink is located. Fixes: #663 * Changelog * Fix input.d path, tie to the agent Config() directory * Update CHANGELOG to reflect that the agent configuration directory is used to locate the inputs.d directory Co-authored-by: Aleksandr Maus --- CHANGELOG.next.asciidoc | 1 + internal/pkg/agent/application/local_mode.go | 2 +- internal/pkg/agent/application/paths/files.go | 8 ++++++++ internal/pkg/agent/configuration/settings.go | 5 ----- 4 files changed, 10 insertions(+), 6 deletions(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 6f7715bdebf..f7793f7b3a9 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -112,6 +112,7 @@ - Allow the / char in variable names in eql and transpiler. {issue}715[715] {pull}718[718] - Fix data duplication for standalone agent on Kubernetes using the default manifest {issue-beats}31512[31512] {pull}742[742] - Agent updates will clean up unneeded artifacts. {issue}693[693] {issue}694[694] {pull}752[752] +- Use the Elastic Agent configuration directory as the root of the `inputs.d` folder. {issues}663[663] - Fix a panic caused by a race condition when installing the Elastic Agent. {issues}806[806] ==== New features diff --git a/internal/pkg/agent/application/local_mode.go b/internal/pkg/agent/application/local_mode.go index e6496b44860..aae202b114f 100644 --- a/internal/pkg/agent/application/local_mode.go +++ b/internal/pkg/agent/application/local_mode.go @@ -173,7 +173,7 @@ func newLocal( } func externalConfigsGlob() string { - return filepath.Join(paths.Config(), configuration.ExternalInputsPattern) + return filepath.Join(paths.AgentInputsDPath(), "*.yml") } // Routes returns a list of routes handled by agent. diff --git a/internal/pkg/agent/application/paths/files.go b/internal/pkg/agent/application/paths/files.go index 7d35549e840..e6a1bf2eda1 100644 --- a/internal/pkg/agent/application/paths/files.go +++ b/internal/pkg/agent/application/paths/files.go @@ -32,6 +32,9 @@ const defaultAgentStateStoreYmlFile = "state.yml" // defaultAgentStateStoreFile is the file that will contain the action that can be replayed after restart encrypted. const defaultAgentStateStoreFile = "state.enc" +// defaultInputDPath return the location of the inputs.d. +const defaultInputsDPath = "inputs.d" + // AgentConfigYmlFile is a name of file used to store agent information func AgentConfigYmlFile() string { return filepath.Join(Config(), defaultAgentFleetYmlFile) @@ -82,3 +85,8 @@ func AgentStateStoreYmlFile() string { func AgentStateStoreFile() string { return filepath.Join(Home(), defaultAgentStateStoreFile) } + +// AgentInputsDPath is directory that contains the fragment of inputs yaml for K8s deployment. +func AgentInputsDPath() string { + return filepath.Join(Config(), defaultInputsDPath) +} diff --git a/internal/pkg/agent/configuration/settings.go b/internal/pkg/agent/configuration/settings.go index 93ef491670f..1531c6f1c95 100644 --- a/internal/pkg/agent/configuration/settings.go +++ b/internal/pkg/agent/configuration/settings.go @@ -5,8 +5,6 @@ package configuration import ( - "path/filepath" - "github.com/elastic/elastic-agent/internal/pkg/artifact" monitoringCfg "github.com/elastic/elastic-agent/internal/pkg/core/monitoring/config" "github.com/elastic/elastic-agent/internal/pkg/core/process" @@ -15,9 +13,6 @@ import ( "github.com/elastic/elastic-agent/pkg/core/server" ) -// ExternalInputsPattern is a glob that matches the paths of external configuration files. -var ExternalInputsPattern = filepath.Join("inputs.d", "*.yml") - // SettingsConfig is an collection of agent settings configuration. type SettingsConfig struct { DownloadConfig *artifact.Config `yaml:"download" config:"download" json:"download"` From 54f36975f7c4df7fd5c095b92bc808b8c3f6a88e Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Thu, 1 Sep 2022 01:35:32 -0400 Subject: [PATCH 102/180] [Automation] Update elastic stack version to 8.5.0-b5001a6d for testing (#1064) Co-authored-by: apmmachine --- testing/environments/snapshot.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/testing/environments/snapshot.yml b/testing/environments/snapshot.yml index 9e60e96183b..eb25dd6f178 100644 --- a/testing/environments/snapshot.yml +++ b/testing/environments/snapshot.yml @@ -3,7 +3,7 @@ version: '2.3' services: elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch:8.5.0-fedc3e60-SNAPSHOT + image: docker.elastic.co/elasticsearch/elasticsearch:8.5.0-b5001a6d-SNAPSHOT # When extend is used it merges healthcheck.tests, see: # https://github.com/docker/compose/issues/8962 # healthcheck: @@ -42,7 +42,7 @@ services: - ./docker/logstash/pki:/etc/pki:ro kibana: - image: docker.elastic.co/kibana/kibana:8.5.0-fedc3e60-SNAPSHOT + image: docker.elastic.co/kibana/kibana:8.5.0-b5001a6d-SNAPSHOT environment: - "ELASTICSEARCH_USERNAME=kibana_system_user" - "ELASTICSEARCH_PASSWORD=testing" From a0957c71936ba514ebf76a939618d2ea639af14f Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Mon, 5 Sep 2022 01:35:09 -0400 Subject: [PATCH 103/180] [Automation] Update elastic stack version to 8.5.0-1bd77fc1 for testing (#1082) Co-authored-by: apmmachine --- testing/environments/snapshot.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/testing/environments/snapshot.yml b/testing/environments/snapshot.yml index eb25dd6f178..8fd2bf44924 100644 --- a/testing/environments/snapshot.yml +++ b/testing/environments/snapshot.yml @@ -3,7 +3,7 @@ version: '2.3' services: elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch:8.5.0-b5001a6d-SNAPSHOT + image: docker.elastic.co/elasticsearch/elasticsearch:8.5.0-1bd77fc1-SNAPSHOT # When extend is used it merges healthcheck.tests, see: # https://github.com/docker/compose/issues/8962 # healthcheck: @@ -42,7 +42,7 @@ services: - ./docker/logstash/pki:/etc/pki:ro kibana: - image: docker.elastic.co/kibana/kibana:8.5.0-b5001a6d-SNAPSHOT + image: docker.elastic.co/kibana/kibana:8.5.0-1bd77fc1-SNAPSHOT environment: - "ELASTICSEARCH_USERNAME=kibana_system_user" - "ELASTICSEARCH_PASSWORD=testing" From 25411d29572adebc67e4fa3fdd970cb218aa4e0c Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Tue, 6 Sep 2022 01:39:11 -0400 Subject: [PATCH 104/180] [Automation] Update elastic stack version to 8.5.0-167dfc80 for testing (#1091) Co-authored-by: apmmachine --- testing/environments/snapshot.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/testing/environments/snapshot.yml b/testing/environments/snapshot.yml index 8fd2bf44924..0b8c45be2a3 100644 --- a/testing/environments/snapshot.yml +++ b/testing/environments/snapshot.yml @@ -3,7 +3,7 @@ version: '2.3' services: elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch:8.5.0-1bd77fc1-SNAPSHOT + image: docker.elastic.co/elasticsearch/elasticsearch:8.5.0-167dfc80-SNAPSHOT # When extend is used it merges healthcheck.tests, see: # https://github.com/docker/compose/issues/8962 # healthcheck: @@ -42,7 +42,7 @@ services: - ./docker/logstash/pki:/etc/pki:ro kibana: - image: docker.elastic.co/kibana/kibana:8.5.0-1bd77fc1-SNAPSHOT + image: docker.elastic.co/kibana/kibana:8.5.0-167dfc80-SNAPSHOT environment: - "ELASTICSEARCH_USERNAME=kibana_system_user" - "ELASTICSEARCH_PASSWORD=testing" From ffc6c00b6ba761261d89120af57b2b90146909cd Mon Sep 17 00:00:00 2001 From: Andrew Gizas Date: Tue, 6 Sep 2022 14:33:55 +0300 Subject: [PATCH 105/180] Adding support for v1.25.0 k8s (#1044) * Adding support for v1.25.0 k8s --- .ci/Jenkinsfile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.ci/Jenkinsfile b/.ci/Jenkinsfile index ab8f17167cb..ccca5538652 100644 --- a/.ci/Jenkinsfile +++ b/.ci/Jenkinsfile @@ -152,7 +152,7 @@ pipeline { } } steps { - runK8s(k8sVersion: 'v1.23.0', kindVersion: 'v0.11.1', context: "K8s-${PLATFORM}") + runK8s(k8sVersion: 'v1.25.0-beta.0', kindVersion: 'v0.14.0', context: "K8s-${PLATFORM}") } } stage('Package') { @@ -224,7 +224,7 @@ pipeline { axes { axis { name 'K8S_VERSION' - values "v1.24.0", "v1.23.6", "v1.22.9", "v1.21.12" + values "v1.25.0","v1.24.3", "v1.23.6", "v1.22.9" } } stages { From 5ca0ae1c94ac35e5d458b32ec0ad715e9f08a83f Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Wed, 7 Sep 2022 01:37:00 -0400 Subject: [PATCH 106/180] [Automation] Update elastic stack version to 8.5.0-6b7dda2d for testing (#1101) Co-authored-by: apmmachine --- testing/environments/snapshot.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/testing/environments/snapshot.yml b/testing/environments/snapshot.yml index 0b8c45be2a3..35fb9660d28 100644 --- a/testing/environments/snapshot.yml +++ b/testing/environments/snapshot.yml @@ -3,7 +3,7 @@ version: '2.3' services: elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch:8.5.0-167dfc80-SNAPSHOT + image: docker.elastic.co/elasticsearch/elasticsearch:8.5.0-6b7dda2d-SNAPSHOT # When extend is used it merges healthcheck.tests, see: # https://github.com/docker/compose/issues/8962 # healthcheck: @@ -42,7 +42,7 @@ services: - ./docker/logstash/pki:/etc/pki:ro kibana: - image: docker.elastic.co/kibana/kibana:8.5.0-167dfc80-SNAPSHOT + image: docker.elastic.co/kibana/kibana:8.5.0-6b7dda2d-SNAPSHOT environment: - "ELASTICSEARCH_USERNAME=kibana_system_user" - "ELASTICSEARCH_PASSWORD=testing" From 7a8aeb481c21159e66a137fd9f66655c2a80f0cd Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Thu, 8 Sep 2022 01:38:16 -0400 Subject: [PATCH 107/180] [Automation] Update elastic stack version to 8.5.0-4140365c for testing (#1114) Co-authored-by: apmmachine --- testing/environments/snapshot.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/testing/environments/snapshot.yml b/testing/environments/snapshot.yml index 35fb9660d28..e3e0ef97547 100644 --- a/testing/environments/snapshot.yml +++ b/testing/environments/snapshot.yml @@ -3,7 +3,7 @@ version: '2.3' services: elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch:8.5.0-6b7dda2d-SNAPSHOT + image: docker.elastic.co/elasticsearch/elasticsearch:8.5.0-4140365c-SNAPSHOT # When extend is used it merges healthcheck.tests, see: # https://github.com/docker/compose/issues/8962 # healthcheck: @@ -42,7 +42,7 @@ services: - ./docker/logstash/pki:/etc/pki:ro kibana: - image: docker.elastic.co/kibana/kibana:8.5.0-6b7dda2d-SNAPSHOT + image: docker.elastic.co/kibana/kibana:8.5.0-4140365c-SNAPSHOT environment: - "ELASTICSEARCH_USERNAME=kibana_system_user" - "ELASTICSEARCH_PASSWORD=testing" From 3951c33a199ef210796bd6a48d4719152d5f739b Mon Sep 17 00:00:00 2001 From: Josh Dover <1813008+joshdover@users.noreply.github.com> Date: Thu, 8 Sep 2022 16:30:22 +0200 Subject: [PATCH 108/180] Remove experimental warning log in upgrade command (#1106) --- internal/pkg/agent/cmd/upgrade.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/internal/pkg/agent/cmd/upgrade.go b/internal/pkg/agent/cmd/upgrade.go index 83128b970e8..5e5d75aeeba 100644 --- a/internal/pkg/agent/cmd/upgrade.go +++ b/internal/pkg/agent/cmd/upgrade.go @@ -36,8 +36,6 @@ func newUpgradeCommandWithArgs(_ []string, streams *cli.IOStreams) *cobra.Comman } func upgradeCmd(streams *cli.IOStreams, cmd *cobra.Command, args []string) error { - fmt.Fprintln(streams.Out, "The upgrade process of Elastic Agent is currently EXPERIMENTAL and should not be used in production") - version := args[0] sourceURI, _ := cmd.Flags().GetString("source-uri") From 802f27e46d7e4fb26024709280386b11d1b03961 Mon Sep 17 00:00:00 2001 From: Craig MacKenzie Date: Thu, 8 Sep 2022 14:43:26 -0400 Subject: [PATCH 109/180] Update go.mod to Go 1.18, update notice. (#1120) --- NOTICE.txt | 266 ----------------------------------------------------- go.mod | 2 +- go.sum | 11 --- 3 files changed, 1 insertion(+), 278 deletions(-) diff --git a/NOTICE.txt b/NOTICE.txt index 3949233c361..7b0395cd0e0 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -7048,36 +7048,6 @@ IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. --------------------------------------------------------------------------------- -Dependency : github.com/cenkalti/backoff -Version: v2.2.1+incompatible -Licence type (autodetected): MIT --------------------------------------------------------------------------------- - -Contents of probable licence file $GOMODCACHE/github.com/cenkalti/backoff@v2.2.1+incompatible/LICENSE: - -The MIT License (MIT) - -Copyright (c) 2014 Cenk Altı - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - - -------------------------------------------------------------------------------- Dependency : github.com/cenkalti/backoff/v4 Version: v4.1.1 @@ -7309,207 +7279,6 @@ Contents of probable licence file $GOMODCACHE/github.com/containerd/containerd@v limitations under the License. --------------------------------------------------------------------------------- -Dependency : github.com/coreos/go-systemd -Version: v0.0.0-20190321100706-95778dfbb74e -Licence type (autodetected): Apache-2.0 --------------------------------------------------------------------------------- - -Contents of probable licence file $GOMODCACHE/github.com/coreos/go-systemd@v0.0.0-20190321100706-95778dfbb74e/LICENSE: - -Apache License -Version 2.0, January 2004 -http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - -"License" shall mean the terms and conditions for use, reproduction, and -distribution as defined by Sections 1 through 9 of this document. - -"Licensor" shall mean the copyright owner or entity authorized by the copyright -owner that is granting the License. - -"Legal Entity" shall mean the union of the acting entity and all other entities -that control, are controlled by, or are under common control with that entity. -For the purposes of this definition, "control" means (i) the power, direct or -indirect, to cause the direction or management of such entity, whether by -contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the -outstanding shares, or (iii) beneficial ownership of such entity. - -"You" (or "Your") shall mean an individual or Legal Entity exercising -permissions granted by this License. - -"Source" form shall mean the preferred form for making modifications, including -but not limited to software source code, documentation source, and configuration -files. - -"Object" form shall mean any form resulting from mechanical transformation or -translation of a Source form, including but not limited to compiled object code, -generated documentation, and conversions to other media types. - -"Work" shall mean the work of authorship, whether in Source or Object form, made -available under the License, as indicated by a copyright notice that is included -in or attached to the work (an example is provided in the Appendix below). - -"Derivative Works" shall mean any work, whether in Source or Object form, that -is based on (or derived from) the Work and for which the editorial revisions, -annotations, elaborations, or other modifications represent, as a whole, an -original work of authorship. For the purposes of this License, Derivative Works -shall not include works that remain separable from, or merely link (or bind by -name) to the interfaces of, the Work and Derivative Works thereof. - -"Contribution" shall mean any work of authorship, including the original version -of the Work and any modifications or additions to that Work or Derivative Works -thereof, that is intentionally submitted to Licensor for inclusion in the Work -by the copyright owner or by an individual or Legal Entity authorized to submit -on behalf of the copyright owner. For the purposes of this definition, -"submitted" means any form of electronic, verbal, or written communication sent -to the Licensor or its representatives, including but not limited to -communication on electronic mailing lists, source code control systems, and -issue tracking systems that are managed by, or on behalf of, the Licensor for -the purpose of discussing and improving the Work, but excluding communication -that is conspicuously marked or otherwise designated in writing by the copyright -owner as "Not a Contribution." - -"Contributor" shall mean Licensor and any individual or Legal Entity on behalf -of whom a Contribution has been received by Licensor and subsequently -incorporated within the Work. - -2. Grant of Copyright License. - -Subject to the terms and conditions of this License, each Contributor hereby -grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, -irrevocable copyright license to reproduce, prepare Derivative Works of, -publicly display, publicly perform, sublicense, and distribute the Work and such -Derivative Works in Source or Object form. - -3. Grant of Patent License. - -Subject to the terms and conditions of this License, each Contributor hereby -grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, -irrevocable (except as stated in this section) patent license to make, have -made, use, offer to sell, sell, import, and otherwise transfer the Work, where -such license applies only to those patent claims licensable by such Contributor -that are necessarily infringed by their Contribution(s) alone or by combination -of their Contribution(s) with the Work to which such Contribution(s) was -submitted. If You institute patent litigation against any entity (including a -cross-claim or counterclaim in a lawsuit) alleging that the Work or a -Contribution incorporated within the Work constitutes direct or contributory -patent infringement, then any patent licenses granted to You under this License -for that Work shall terminate as of the date such litigation is filed. - -4. Redistribution. - -You may reproduce and distribute copies of the Work or Derivative Works thereof -in any medium, with or without modifications, and in Source or Object form, -provided that You meet the following conditions: - -You must give any other recipients of the Work or Derivative Works a copy of -this License; and -You must cause any modified files to carry prominent notices stating that You -changed the files; and -You must retain, in the Source form of any Derivative Works that You distribute, -all copyright, patent, trademark, and attribution notices from the Source form -of the Work, excluding those notices that do not pertain to any part of the -Derivative Works; and -If the Work includes a "NOTICE" text file as part of its distribution, then any -Derivative Works that You distribute must include a readable copy of the -attribution notices contained within such NOTICE file, excluding those notices -that do not pertain to any part of the Derivative Works, in at least one of the -following places: within a NOTICE text file distributed as part of the -Derivative Works; within the Source form or documentation, if provided along -with the Derivative Works; or, within a display generated by the Derivative -Works, if and wherever such third-party notices normally appear. The contents of -the NOTICE file are for informational purposes only and do not modify the -License. You may add Your own attribution notices within Derivative Works that -You distribute, alongside or as an addendum to the NOTICE text from the Work, -provided that such additional attribution notices cannot be construed as -modifying the License. -You may add Your own copyright statement to Your modifications and may provide -additional or different license terms and conditions for use, reproduction, or -distribution of Your modifications, or for any such Derivative Works as a whole, -provided Your use, reproduction, and distribution of the Work otherwise complies -with the conditions stated in this License. - -5. Submission of Contributions. - -Unless You explicitly state otherwise, any Contribution intentionally submitted -for inclusion in the Work by You to the Licensor shall be under the terms and -conditions of this License, without any additional terms or conditions. -Notwithstanding the above, nothing herein shall supersede or modify the terms of -any separate license agreement you may have executed with Licensor regarding -such Contributions. - -6. Trademarks. - -This License does not grant permission to use the trade names, trademarks, -service marks, or product names of the Licensor, except as required for -reasonable and customary use in describing the origin of the Work and -reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. - -Unless required by applicable law or agreed to in writing, Licensor provides the -Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, -including, without limitation, any warranties or conditions of TITLE, -NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are -solely responsible for determining the appropriateness of using or -redistributing the Work and assume any risks associated with Your exercise of -permissions under this License. - -8. Limitation of Liability. - -In no event and under no legal theory, whether in tort (including negligence), -contract, or otherwise, unless required by applicable law (such as deliberate -and grossly negligent acts) or agreed to in writing, shall any Contributor be -liable to You for damages, including any direct, indirect, special, incidental, -or consequential damages of any character arising as a result of this License or -out of the use or inability to use the Work (including but not limited to -damages for loss of goodwill, work stoppage, computer failure or malfunction, or -any and all other commercial damages or losses), even if such Contributor has -been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. - -While redistributing the Work or Derivative Works thereof, You may choose to -offer, and charge a fee for, acceptance of support, warranty, indemnity, or -other liability obligations and/or rights consistent with this License. However, -in accepting such obligations, You may act only on Your own behalf and on Your -sole responsibility, not on behalf of any other Contributor, and only if You -agree to indemnify, defend, and hold each Contributor harmless for any liability -incurred by, or claims asserted against, such Contributor by reason of your -accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work - -To apply the Apache License to your work, attach the following boilerplate -notice, with the fields enclosed by brackets "[]" replaced with your own -identifying information. (Don't include the brackets!) The text should be -enclosed in the appropriate comment syntax for the file format. We also -recommend that a file or class name and description of purpose be included on -the same "printed page" as the copyright notice for easier identification within -third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - - -------------------------------------------------------------------------------- Dependency : github.com/cyphar/filepath-securejoin Version: v0.2.3 @@ -9272,41 +9041,6 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. --------------------------------------------------------------------------------- -Dependency : github.com/godbus/dbus -Version: v0.0.0-20190422162347-ade71ed3457e -Licence type (autodetected): BSD-2-Clause --------------------------------------------------------------------------------- - -Contents of probable licence file $GOMODCACHE/github.com/godbus/dbus@v0.0.0-20190422162347-ade71ed3457e/LICENSE: - -Copyright (c) 2013, Georg Reinke (), Google -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions -are met: - -1. Redistributions of source code must retain the above copyright notice, -this list of conditions and the following disclaimer. - -2. Redistributions in binary form must reproduce the above copyright -notice, this list of conditions and the following disclaimer in the -documentation and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED -TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF -LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING -NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -------------------------------------------------------------------------------- Dependency : github.com/godbus/dbus/v5 Version: v5.0.5 diff --git a/go.mod b/go.mod index dd1d29345e3..9f0dfb21464 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/elastic/elastic-agent -go 1.17 +go 1.18 require ( github.com/Microsoft/go-winio v0.5.2 diff --git a/go.sum b/go.sum index 4b63a456695..22059e4c0b3 100644 --- a/go.sum +++ b/go.sum @@ -92,7 +92,6 @@ github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2 github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00= github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+VxGOoXdC600= github.com/Microsoft/hcsshim v0.8.21/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4= -github.com/Microsoft/hcsshim v0.8.24/go.mod h1:4zegtUJth7lAvFyc6cH2gGQ5B3OFQim01nnU2M8jKDg= github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU= github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= @@ -175,7 +174,6 @@ github.com/cavaliercoder/badio v0.0.0-20160213150051-ce5280129e9e h1:YYUjy5BRwO5 github.com/cavaliercoder/badio v0.0.0-20160213150051-ce5280129e9e/go.mod h1:V284PjgVwSk4ETmz84rpu9ehpGg7swlIH8npP9k2bGw= github.com/cavaliercoder/go-rpm v0.0.0-20190131055624-7a9c54e3d83e h1:Gbx+iVCXG/1m5WSnidDGuHgN+vbIwl+6fR092ANU+Y8= github.com/cavaliercoder/go-rpm v0.0.0-20190131055624-7a9c54e3d83e/go.mod h1:AZIh1CCnMrcVm6afFf96PBvE2MRpWFco91z8ObJtgDY= -github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/cenkalti/backoff/v4 v4.1.0/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/cenkalti/backoff/v4 v4.1.1 h1:G2HAfAmvm/GcKan2oOQpBXOd2tT2G57ZnZGWa1PxPBQ= @@ -229,7 +227,6 @@ github.com/containerd/cgroups v0.0.0-20200710171044-318312a37340/go.mod h1:s5q4S github.com/containerd/cgroups v0.0.0-20200824123100-0b889c03f102/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo= github.com/containerd/cgroups v0.0.0-20210114181951-8a68de567b68/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= github.com/containerd/cgroups v1.0.1/go.mod h1:0SJrPIenamHDcZhEcJMNBB85rHcUsw4f25ZfBiPYRkU= -github.com/containerd/cgroups v1.0.3/go.mod h1:/ofk34relqNjSGyqPrmEULrO4Sc8LJhvJmWbUCUKqj8= github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= github.com/containerd/console v0.0.0-20191206165004-02ecf6a7291e/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE= @@ -243,7 +240,6 @@ github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMX github.com/containerd/containerd v1.4.0-beta.2.0.20200729163537-40b22ef07410/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.4.1/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.4.3/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/containerd v1.4.9/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.5.0-beta.1/go.mod h1:5HfvG1V2FsKesEGQ17k5/T7V960Tmcumvqn8Mc+pCYQ= github.com/containerd/containerd v1.5.0-beta.3/go.mod h1:/wr9AVtEM7x9c+n0+stptlo/uBBoBORwEx6ardVcmKU= github.com/containerd/containerd v1.5.0-beta.4/go.mod h1:GmdgZd2zA2GYIBZ0w09ZvgqEq8EfBp/m3lcVZIvPHhI= @@ -285,7 +281,6 @@ github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDG github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8= github.com/containerd/ttrpc v1.0.1/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y= github.com/containerd/ttrpc v1.0.2/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y= -github.com/containerd/ttrpc v1.1.0/go.mod h1:XX4ZTnoOId4HklF4edwc4DcqskFZuvXB1Evzy5KFQpQ= github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc= github.com/containerd/typeurl v0.0.0-20190911142611-5eb25027c9fd/go.mod h1:GeKYzf2pQcqv7tJ0AoCuuhtnqhva5LNU3U+OyKxxJpk= github.com/containerd/typeurl v1.0.1/go.mod h1:TB1hUtrpaiO88KEK56ijojHS1+NeF0izUACaJW2mdXg= @@ -313,7 +308,6 @@ github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3Ee github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-systemd v0.0.0-20161114122254-48702e0da86b/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e h1:Wf6HqHfScWJN9/ZjdUKyjop4mf3Qdd+1TvvltAvM3m8= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= @@ -386,7 +380,6 @@ github.com/elastic/elastic-agent-autodiscover v0.0.0-20220404145827-89887023c1ab github.com/elastic/elastic-agent-client/v7 v7.0.0-20210727140539-f0905d9377f6 h1:nFvXHBjYK3e9+xF0WKDeAKK4aOO51uC28s+L9rBmilo= github.com/elastic/elastic-agent-client/v7 v7.0.0-20210727140539-f0905d9377f6/go.mod h1:uh/Gj9a0XEbYoM4NYz4LvaBVARz3QXLmlNjsrKY9fTc= github.com/elastic/elastic-agent-libs v0.0.0-20220303160015-5b4e674da3dd/go.mod h1://82M1l73IHx0wDbS2Tzkq6Fx9fkmytS1KgkIyzvNTM= -github.com/elastic/elastic-agent-libs v0.2.2/go.mod h1:1xDLBhIqBIjhJ7lr2s+xRFFkQHpitSp8q2zzv1Dqg+s= github.com/elastic/elastic-agent-libs v0.2.6 h1:DpcUcCVYZ7lNtHLUlyT1u/GtGAh49wpL15DTH7+8O5o= github.com/elastic/elastic-agent-libs v0.2.6/go.mod h1:chO3rtcLyGlKi9S0iGVZhYCzDfdDsAQYBc+ui588AFE= github.com/elastic/elastic-agent-system-metrics v0.3.0 h1:W8L0E8lWJmdguH+oIR7OzuFgopvw8ucZAE9w6iqVlpE= @@ -397,7 +390,6 @@ github.com/elastic/go-elasticsearch/v8 v8.0.0-20210317102009-a9d74cec0186/go.mod github.com/elastic/go-licenser v0.3.1/go.mod h1:D8eNQk70FOCVBl3smCGQt/lv7meBeQno2eI1S5apiHQ= github.com/elastic/go-licenser v0.4.0 h1:jLq6A5SilDS/Iz1ABRkO6BHy91B9jBora8FwGRsDqUI= github.com/elastic/go-licenser v0.4.0/go.mod h1:V56wHMpmdURfibNBggaSBfqgPxyT1Tldns1i87iTEvU= -github.com/elastic/go-structform v0.0.9/go.mod h1:CZWf9aIRYY5SuKSmOhtXScE5uQiLZNqAFnwKR4OrIM4= github.com/elastic/go-structform v0.0.10 h1:oy08o/Ih2hHTkNcRY/1HhaYvIp5z6t8si8gnCJPDo1w= github.com/elastic/go-structform v0.0.10/go.mod h1:CZWf9aIRYY5SuKSmOhtXScE5uQiLZNqAFnwKR4OrIM4= github.com/elastic/go-sysinfo v1.1.1/go.mod h1:i1ZYdU10oLNfRzq4vq62BEwD2fH8KaWh6eh0ikPT9F0= @@ -524,7 +516,6 @@ github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY9 github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= github.com/godbus/dbus v0.0.0-20151105175453-c7fdd8b5cd55/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= github.com/godbus/dbus v0.0.0-20180201030542-885f9cc04c9c/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= -github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e h1:BWhy2j3IXJhjCbC68FptL43tDKIq8FladmaTs3Xs7Z8= github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= @@ -1406,7 +1397,6 @@ golang.org/x/net v0.0.0-20210520170846-37e1c6afe023/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20211020060615-d418f374d309/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= @@ -1565,7 +1555,6 @@ golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20211205182925-97ca703d548d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220405052023-b1e9470b6e64/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220412211240-33da011f77ad h1:ntjMns5wyP/fN65tdBD4g8J5w8n015+iIIs9rtjXkY0= golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= From 6e2e06c06743fde7cb4fbb06934c720d59f9959a Mon Sep 17 00:00:00 2001 From: Michel Laterman <82832767+michel-laterman@users.noreply.github.com> Date: Fri, 9 Sep 2022 06:42:28 -0700 Subject: [PATCH 110/180] Remove the fleet reporter (#1130) * Remove the fleet reporter Remove the fleet-reporter so that checkins no longer deliver the event list. * add CHANGELOG fix tests --- CHANGELOG.next.asciidoc | 1 + .../gateway/fleet/fleet_gateway.go | 15 -- .../gateway/fleet/fleet_gateway_test.go | 54 +--- .../pkg/agent/application/managed_mode.go | 9 +- internal/pkg/agent/configuration/fleet.go | 19 +- internal/pkg/fleetapi/checkin_cmd.go | 7 +- internal/pkg/reporter/fleet/config/config.go | 19 -- internal/pkg/reporter/fleet/reporter.go | 175 ------------- internal/pkg/reporter/fleet/reporter_test.go | 241 ------------------ 9 files changed, 16 insertions(+), 524 deletions(-) delete mode 100644 internal/pkg/reporter/fleet/config/config.go delete mode 100644 internal/pkg/reporter/fleet/reporter.go delete mode 100644 internal/pkg/reporter/fleet/reporter_test.go diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index f7793f7b3a9..48109ba3c0d 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -114,6 +114,7 @@ - Agent updates will clean up unneeded artifacts. {issue}693[693] {issue}694[694] {pull}752[752] - Use the Elastic Agent configuration directory as the root of the `inputs.d` folder. {issues}663[663] - Fix a panic caused by a race condition when installing the Elastic Agent. {issues}806[806] +- Remove fleet event reporter and events from checkin body. {issue}993[993] ==== New features diff --git a/internal/pkg/agent/application/gateway/fleet/fleet_gateway.go b/internal/pkg/agent/application/gateway/fleet/fleet_gateway.go index f5c02d3356a..3cce7073e3a 100644 --- a/internal/pkg/agent/application/gateway/fleet/fleet_gateway.go +++ b/internal/pkg/agent/application/gateway/fleet/fleet_gateway.go @@ -56,10 +56,6 @@ type agentInfo interface { AgentID() string } -type fleetReporter interface { - Events() ([]fleetapi.SerializableEvent, func()) -} - type stateStore interface { Add(fleetapi.Action) AckToken() string @@ -85,7 +81,6 @@ type fleetGateway struct { backoff backoff.Backoff settings *fleetGatewaySettings agentInfo agentInfo - reporter fleetReporter done chan struct{} wg sync.WaitGroup acker store.FleetAcker @@ -104,7 +99,6 @@ func New( agentInfo agentInfo, client client.Sender, d pipeline.Dispatcher, - r fleetReporter, acker store.FleetAcker, statusController status.Controller, stateStore stateStore, @@ -120,7 +114,6 @@ func New( client, d, scheduler, - r, acker, statusController, stateStore, @@ -136,7 +129,6 @@ func newFleetGatewayWithScheduler( client client.Sender, d pipeline.Dispatcher, scheduler scheduler.Scheduler, - r fleetReporter, acker store.FleetAcker, statusController status.Controller, stateStore stateStore, @@ -162,7 +154,6 @@ func newFleetGatewayWithScheduler( settings.Backoff.Max, ), done: done, - reporter: r, acker: acker, statusReporter: statusController.RegisterComponent("gateway"), statusController: statusController, @@ -323,9 +314,6 @@ func (f *fleetGateway) doExecute() (*fleetapi.CheckinResponse, error) { } func (f *fleetGateway) execute(ctx context.Context) (*fleetapi.CheckinResponse, error) { - // get events - ee, ack := f.reporter.Events() - ecsMeta, err := info.Metadata() if err != nil { f.log.Error(errors.New("failed to load metadata", err)) @@ -341,7 +329,6 @@ func (f *fleetGateway) execute(ctx context.Context) (*fleetapi.CheckinResponse, cmd := fleetapi.NewCheckinCmd(f.agentInfo, f.client) req := &fleetapi.CheckinRequest{ AckToken: ackToken, - Events: ee, Metadata: ecsMeta, Status: f.statusController.StatusString(), } @@ -374,8 +361,6 @@ func (f *fleetGateway) execute(ctx context.Context) (*fleetapi.CheckinResponse, } } - // ack events so they are dropped from queue - ack() return resp, nil } diff --git a/internal/pkg/agent/application/gateway/fleet/fleet_gateway_test.go b/internal/pkg/agent/application/gateway/fleet/fleet_gateway_test.go index 6ce62448276..b02507e61ad 100644 --- a/internal/pkg/agent/application/gateway/fleet/fleet_gateway_test.go +++ b/internal/pkg/agent/application/gateway/fleet/fleet_gateway_test.go @@ -18,7 +18,6 @@ import ( "testing" "time" - "github.com/pkg/errors" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" @@ -29,9 +28,6 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/core/state" "github.com/elastic/elastic-agent/internal/pkg/fleetapi" noopacker "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker/noop" - repo "github.com/elastic/elastic-agent/internal/pkg/reporter" - fleetreporter "github.com/elastic/elastic-agent/internal/pkg/reporter/fleet" - fleetreporterConfig "github.com/elastic/elastic-agent/internal/pkg/reporter/fleet/config" "github.com/elastic/elastic-agent/internal/pkg/scheduler" "github.com/elastic/elastic-agent/internal/pkg/testutils" "github.com/elastic/elastic-agent/pkg/core/logger" @@ -137,7 +133,7 @@ func (m *mockQueue) Actions() []fleetapi.Action { return args.Get(0).([]fleetapi.Action) } -type withGatewayFunc func(*testing.T, gateway.FleetGateway, *testingClient, *testingDispatcher, *scheduler.Stepper, repo.Backend) +type withGatewayFunc func(*testing.T, gateway.FleetGateway, *testingClient, *testingDispatcher, *scheduler.Stepper) func withGateway(agentInfo agentInfo, settings *fleetGatewaySettings, fn withGatewayFunc) func(t *testing.T) { return func(t *testing.T) { @@ -146,8 +142,6 @@ func withGateway(agentInfo agentInfo, settings *fleetGatewaySettings, fn withGat dispatcher := newTestingDispatcher() log, _ := logger.New("fleet_gateway", false) - rep := getReporter(agentInfo, log, t) - ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -167,7 +161,6 @@ func withGateway(agentInfo agentInfo, settings *fleetGatewaySettings, fn withGat client, dispatcher, scheduler, - rep, noopacker.NewAcker(), &noopController{}, stateStore, @@ -176,7 +169,7 @@ func withGateway(agentInfo agentInfo, settings *fleetGatewaySettings, fn withGat require.NoError(t, err) - fn(t, gateway, client, dispatcher, scheduler, rep) + fn(t, gateway, client, dispatcher, scheduler) } } @@ -214,7 +207,6 @@ func TestFleetGateway(t *testing.T) { client *testingClient, dispatcher *testingDispatcher, scheduler *scheduler.Stepper, - rep repo.Backend, ) { waitFn := ackSeq( client.Answer(func(headers http.Header, body io.Reader) (*http.Response, error) { @@ -240,7 +232,6 @@ func TestFleetGateway(t *testing.T) { client *testingClient, dispatcher *testingDispatcher, scheduler *scheduler.Stepper, - rep repo.Backend, ) { waitFn := ackSeq( client.Answer(func(headers http.Header, body io.Reader) (*http.Response, error) { @@ -305,7 +296,6 @@ func TestFleetGateway(t *testing.T) { client, dispatcher, scheduler, - getReporter(agentInfo, log, t), noopacker.NewAcker(), &noopController{}, stateStore, @@ -366,7 +356,6 @@ func TestFleetGateway(t *testing.T) { client, dispatcher, scheduler, - getReporter(agentInfo, log, t), noopacker.NewAcker(), &noopController{}, stateStore, @@ -432,7 +421,6 @@ func TestFleetGateway(t *testing.T) { client, dispatcher, scheduler, - getReporter(agentInfo, log, t), noopacker.NewAcker(), &noopController{}, stateStore, @@ -487,7 +475,6 @@ func TestFleetGateway(t *testing.T) { client, dispatcher, scheduler, - getReporter(agentInfo, log, t), noopacker.NewAcker(), &noopController{}, stateStore, @@ -544,7 +531,6 @@ func TestFleetGateway(t *testing.T) { client, dispatcher, scheduler, - getReporter(agentInfo, log, t), noopacker.NewAcker(), &noopController{}, stateStore, @@ -594,9 +580,7 @@ func TestFleetGateway(t *testing.T) { client *testingClient, dispatcher *testingDispatcher, scheduler *scheduler.Stepper, - rep repo.Backend, ) { - _ = rep.Report(context.Background(), &testStateEvent{}) waitFn := ackSeq( client.Answer(func(headers http.Header, body io.Reader) (*http.Response, error) { cr := &request{} @@ -609,8 +593,6 @@ func TestFleetGateway(t *testing.T) { t.Fatal(err) } - require.Equal(t, 1, len(cr.Events)) - resp := wrapStrToResp(http.StatusOK, `{ "actions": [] }`) return resp, nil }), @@ -657,7 +639,6 @@ func TestFleetGateway(t *testing.T) { client, dispatcher, scheduler, - getReporter(agentInfo, log, t), noopacker.NewAcker(), &noopController{}, stateStore, @@ -712,8 +693,6 @@ func TestRetriesOnFailures(t *testing.T) { client := newTestingClient() dispatcher := newTestingDispatcher() log, _ := logger.New("fleet_gateway", false) - rep := getReporter(agentInfo, log, t) - ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -742,7 +721,6 @@ func TestRetriesOnFailures(t *testing.T) { client, dispatcher, scheduler, - rep, noopacker.NewAcker(), statusController, stateStore, @@ -757,8 +735,6 @@ func TestRetriesOnFailures(t *testing.T) { err = gateway.Start() require.NoError(t, err) - _ = rep.Report(context.Background(), &testStateEvent{}) - // Initial tick is done out of bound so we can block on channels. scheduler.Next() @@ -780,8 +756,6 @@ func TestRetriesOnFailures(t *testing.T) { t.Fatal(err) } - require.Equal(t, 1, len(cr.Events)) - resp := wrapStrToResp(http.StatusOK, `{ "actions": [] }`) return resp, nil }), @@ -807,7 +781,6 @@ func TestRetriesOnFailures(t *testing.T) { client *testingClient, dispatcher *testingDispatcher, scheduler *scheduler.Stepper, - rep repo.Backend, ) { fail := func(_ http.Header, _ io.Reader) (*http.Response, error) { return wrapStrToResp(http.StatusInternalServerError, "something is bad"), nil @@ -816,8 +789,6 @@ func TestRetriesOnFailures(t *testing.T) { err := gateway.Start() require.NoError(t, err) - _ = rep.Report(context.Background(), &testStateEvent{}) - // Initial tick is done out of bound so we can block on channels. scheduler.Next() @@ -830,27 +801,8 @@ func TestRetriesOnFailures(t *testing.T) { })) } -func getReporter(info agentInfo, log *logger.Logger, t *testing.T) *fleetreporter.Reporter { - fleetR, err := fleetreporter.NewReporter(info, log, fleetreporterConfig.DefaultConfig()) - if err != nil { - t.Fatal(errors.Wrap(err, "fail to create reporters")) - } - - return fleetR -} - type testAgentInfo struct{} func (testAgentInfo) AgentID() string { return "agent-secret" } -type testStateEvent struct{} - -func (testStateEvent) Type() string { return repo.EventTypeState } -func (testStateEvent) SubType() string { return repo.EventSubTypeInProgress } -func (testStateEvent) Time() time.Time { return time.Unix(0, 1) } -func (testStateEvent) Message() string { return "hello" } -func (testStateEvent) Payload() map[string]interface{} { return map[string]interface{}{"key": 1} } - -type request struct { - Events []interface{} `json:"events"` -} +type request struct{} diff --git a/internal/pkg/agent/application/managed_mode.go b/internal/pkg/agent/application/managed_mode.go index 08c43aeeca3..037cf74ad5c 100644 --- a/internal/pkg/agent/application/managed_mode.go +++ b/internal/pkg/agent/application/managed_mode.go @@ -44,7 +44,6 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/fleetapi/client" "github.com/elastic/elastic-agent/internal/pkg/queue" reporting "github.com/elastic/elastic-agent/internal/pkg/reporter" - fleetreporter "github.com/elastic/elastic-agent/internal/pkg/reporter/fleet" logreporter "github.com/elastic/elastic-agent/internal/pkg/reporter/log" "github.com/elastic/elastic-agent/internal/pkg/sorted" "github.com/elastic/elastic-agent/pkg/core/logger" @@ -124,12 +123,7 @@ func newManaged( } logR := logreporter.NewReporter(log) - fleetR, err := fleetreporter.NewReporter(agentInfo, log, cfg.Fleet.Reporting) - if err != nil { - return nil, errors.New(err, "fail to create reporters") - } - - combinedReporter := reporting.NewReporter(managedApplication.bgContext, log, agentInfo, logR, fleetR) + combinedReporter := reporting.NewReporter(managedApplication.bgContext, log, agentInfo, logR) monitor, err := monitoring.NewMonitor(cfg.Settings) if err != nil { return nil, errors.New(err, "failed to initialize monitoring") @@ -288,7 +282,6 @@ func newManaged( agentInfo, client, actionDispatcher, - fleetR, actionAcker, statusCtrl, stateStore, diff --git a/internal/pkg/agent/configuration/fleet.go b/internal/pkg/agent/configuration/fleet.go index 5bc9c115a63..0ae59c8f4e8 100644 --- a/internal/pkg/agent/configuration/fleet.go +++ b/internal/pkg/agent/configuration/fleet.go @@ -7,18 +7,16 @@ package configuration import ( "github.com/elastic/elastic-agent/internal/pkg/agent/errors" "github.com/elastic/elastic-agent/internal/pkg/remote" - fleetreporterConfig "github.com/elastic/elastic-agent/internal/pkg/reporter/fleet/config" ) // FleetAgentConfig is the internal configuration of the agent after the enrollment is done, // this configuration is not exposed in anyway in the elastic-agent.yml and is only internal configuration. type FleetAgentConfig struct { - Enabled bool `config:"enabled" yaml:"enabled"` - AccessAPIKey string `config:"access_api_key" yaml:"access_api_key"` - Client remote.Config `config:",inline" yaml:",inline"` - Reporting *fleetreporterConfig.Config `config:"reporting" yaml:"reporting"` - Info *AgentInfo `config:"agent" yaml:"agent"` - Server *FleetServerConfig `config:"server" yaml:"server,omitempty"` + Enabled bool `config:"enabled" yaml:"enabled"` + AccessAPIKey string `config:"access_api_key" yaml:"access_api_key"` + Client remote.Config `config:",inline" yaml:",inline"` + Info *AgentInfo `config:"agent" yaml:"agent"` + Server *FleetServerConfig `config:"server" yaml:"server,omitempty"` } // Valid validates the required fields for accessing the API. @@ -44,9 +42,8 @@ func (e *FleetAgentConfig) Valid() error { // DefaultFleetAgentConfig creates a default configuration for fleet. func DefaultFleetAgentConfig() *FleetAgentConfig { return &FleetAgentConfig{ - Enabled: false, - Client: remote.DefaultClientConfig(), - Reporting: fleetreporterConfig.DefaultConfig(), - Info: &AgentInfo{}, + Enabled: false, + Client: remote.DefaultClientConfig(), + Info: &AgentInfo{}, } } diff --git a/internal/pkg/fleetapi/checkin_cmd.go b/internal/pkg/fleetapi/checkin_cmd.go index 47a76ea47e7..e225aababb9 100644 --- a/internal/pkg/fleetapi/checkin_cmd.go +++ b/internal/pkg/fleetapi/checkin_cmd.go @@ -22,10 +22,9 @@ const checkingPath = "/api/fleet/agents/%s/checkin" // CheckinRequest consists of multiple events reported to fleet ui. type CheckinRequest struct { - Status string `json:"status"` - AckToken string `json:"ack_token,omitempty"` - Events []SerializableEvent `json:"events"` - Metadata *info.ECSMeta `json:"local_metadata,omitempty"` + Status string `json:"status"` + AckToken string `json:"ack_token,omitempty"` + Metadata *info.ECSMeta `json:"local_metadata,omitempty"` } // SerializableEvent is a representation of the event to be send to the Fleet Server API via the checkin diff --git a/internal/pkg/reporter/fleet/config/config.go b/internal/pkg/reporter/fleet/config/config.go deleted file mode 100644 index 1e42b956ee8..00000000000 --- a/internal/pkg/reporter/fleet/config/config.go +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package config - -// Config is a configuration describing fleet connected parts -type Config struct { - Threshold int `yaml:"threshold" config:"threshold" validate:"min=1"` - ReportingCheckFrequency int `yaml:"check_frequency_sec" config:"check_frequency_sec" validate:"min=1"` -} - -// DefaultConfig initiates FleetManagementConfig with default values -func DefaultConfig() *Config { - return &Config{ - Threshold: 10000, - ReportingCheckFrequency: 30, - } -} diff --git a/internal/pkg/reporter/fleet/reporter.go b/internal/pkg/reporter/fleet/reporter.go deleted file mode 100644 index edf5008bc01..00000000000 --- a/internal/pkg/reporter/fleet/reporter.go +++ /dev/null @@ -1,175 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package fleet - -import ( - "context" - "sync" - "time" - - "github.com/elastic/elastic-agent/internal/pkg/fleetapi" - "github.com/elastic/elastic-agent/internal/pkg/reporter" - "github.com/elastic/elastic-agent/internal/pkg/reporter/fleet/config" - "github.com/elastic/elastic-agent/pkg/core/logger" -) - -type event struct { - AgentID string `json:"agent_id"` - EventType string `json:"type"` - TS fleetapi.Time `json:"timestamp"` - SubType string `json:"subtype"` - Msg string `json:"message"` - Payload map[string]interface{} `json:"payload,omitempty"` -} - -func (e *event) Type() string { - return e.EventType -} - -func (e *event) Timestamp() time.Time { - return time.Time(e.TS) -} - -func (e *event) Message() string { - return e.Msg -} - -// Reporter is a reporter without any effects, serves just as a showcase for further implementations. -type Reporter struct { - lastAck time.Time - info agentInfo - logger *logger.Logger - queue []fleetapi.SerializableEvent - threshold int - qlock sync.Mutex -} - -type agentInfo interface { - AgentID() string -} - -// NewReporter creates a new fleet reporter. -func NewReporter(agentInfo agentInfo, l *logger.Logger, c *config.Config) (*Reporter, error) { - r := &Reporter{ - info: agentInfo, - queue: make([]fleetapi.SerializableEvent, 0), - logger: l, - threshold: c.Threshold, - } - - return r, nil -} - -// Report enqueue event into reporter queue. -func (r *Reporter) Report(ctx context.Context, e reporter.Event) error { - r.qlock.Lock() - defer r.qlock.Unlock() - - r.queue = append(r.queue, &event{ - AgentID: r.info.AgentID(), - EventType: e.Type(), - TS: fleetapi.Time(e.Time()), - SubType: e.SubType(), - Msg: e.Message(), - Payload: e.Payload(), - }) - - if r.threshold > 0 && len(r.queue) > r.threshold { - // drop some low importance event if needed - r.dropEvent() - } - - return nil -} - -// Events returns a list of event from a queue and a ack function -// which clears those events once caller is done with processing. -func (r *Reporter) Events() ([]fleetapi.SerializableEvent, func()) { - r.qlock.Lock() - defer r.qlock.Unlock() - - cp := r.queueCopy() - - ackFn := func() { - // as time is monotonic and this is on single machine this should be ok. - r.clear(cp, time.Now()) - } - - return cp, ackFn -} - -func (r *Reporter) clear(items []fleetapi.SerializableEvent, ackTime time.Time) { - r.qlock.Lock() - defer r.qlock.Unlock() - - if ackTime.Sub(r.lastAck) <= 0 || - len(r.queue) == 0 || - items == nil || - len(items) == 0 { - return - } - - var dropIdx int - r.lastAck = ackTime - itemsLen := len(items) - -OUTER: - for idx := itemsLen - 1; idx >= 0; idx-- { - for i, v := range r.queue { - if v == items[idx] { - dropIdx = i - break OUTER - } - } - } - - r.queue = r.queue[dropIdx+1:] -} - -// Close stops all the background jobs reporter is running. -// Guards against panic of closing channel multiple times. -func (r *Reporter) Close() error { - return nil -} - -func (r *Reporter) queueCopy() []fleetapi.SerializableEvent { - size := len(r.queue) - batch := make([]fleetapi.SerializableEvent, size) - - copy(batch, r.queue) - return batch -} - -func (r *Reporter) dropEvent() { - if dropped := r.tryDropInfo(); !dropped { - r.dropFirst() - } -} - -// tryDropInfo returns true if info was found and dropped. -func (r *Reporter) tryDropInfo() bool { - for i, e := range r.queue { - if e.Type() != reporter.EventTypeError { - r.queue = append(r.queue[:i], r.queue[i+1:]...) - r.logger.Infof("fleet reporter dropped event because threshold[%d] was reached: %v", r.threshold, e) - return true - } - } - - return false -} - -func (r *Reporter) dropFirst() { - if len(r.queue) == 0 { - return - } - - first := r.queue[0] - r.logger.Infof("fleet reporter dropped event because threshold[%d] was reached: %v", r.threshold, first) - r.queue = r.queue[1:] -} - -// Check it is reporter.Backend. -var _ reporter.Backend = &Reporter{} diff --git a/internal/pkg/reporter/fleet/reporter_test.go b/internal/pkg/reporter/fleet/reporter_test.go deleted file mode 100644 index c5160168a98..00000000000 --- a/internal/pkg/reporter/fleet/reporter_test.go +++ /dev/null @@ -1,241 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package fleet - -import ( - "context" - "testing" - "time" - - "github.com/elastic/elastic-agent/internal/pkg/fleetapi" - "github.com/elastic/elastic-agent/internal/pkg/reporter" - "github.com/elastic/elastic-agent/pkg/core/logger" -) - -func TestEventsHaveAgentID(t *testing.T) { - // setup client - threshold := 10 - r := newTestReporter(1*time.Second, threshold) - - // report events - firstBatchSize := 5 - ee := getEvents(firstBatchSize) - for _, e := range ee { - r.Report(context.Background(), e) - } - - // check after delay for output - reportedEvents, _ := r.Events() - if reportedCount := len(reportedEvents); reportedCount != firstBatchSize { - t.Fatalf("expected %v events got %v", firstBatchSize, reportedCount) - } - - for _, e := range reportedEvents { - re, ok := e.(*event) - - if !ok { - t.Fatal("reported event is not an event") - } - - if re.AgentID != "agentID" { - t.Fatalf("reported event id incorrect, expected: 'agentID', got: '%v'", re.AgentID) - } - } - -} - -func TestReporting(t *testing.T) { - // setup client - threshold := 10 - r := newTestReporter(1*time.Second, threshold) - - // report events - firstBatchSize := 5 - ee := getEvents(firstBatchSize) - for _, e := range ee { - r.Report(context.Background(), e) - } - - // check after delay for output - reportedEvents, ack := r.Events() - if reportedCount := len(reportedEvents); reportedCount != firstBatchSize { - t.Fatalf("expected %v events got %v", firstBatchSize, reportedCount) - } - - // reset reported events - ack() - - // report events > threshold - secondBatchSize := threshold + 1 - ee = getEvents(secondBatchSize) - for _, e := range ee { - r.Report(context.Background(), e) - } - - // check events are dropped - reportedEvents, _ = r.Events() - if reportedCount := len(reportedEvents); reportedCount != threshold { - t.Fatalf("expected %v events got %v", secondBatchSize, reportedCount) - } -} - -func TestInfoDrop(t *testing.T) { - // setup client - threshold := 2 - r := newTestReporter(2*time.Second, threshold) - - // report 1 info and 1 error - ee := []reporter.Event{testStateEvent{}, testErrorEvent{}, testErrorEvent{}} - - for _, e := range ee { - r.Report(context.Background(), e) - } - - // check after delay for output - reportedEvents, _ := r.Events() - if reportedCount := len(reportedEvents); reportedCount != 2 { - t.Fatalf("expected %v events got %v", 2, reportedCount) - } - - // check both are errors - if reportedEvents[0].Type() != reportedEvents[1].Type() || reportedEvents[0].Type() != reporter.EventTypeError { - t.Fatalf("expected ERROR events got [1]: '%v', [2]: '%v'", reportedEvents[0].Type(), reportedEvents[1].Type()) - } -} - -func TestOutOfOrderAck(t *testing.T) { - // setup client - threshold := 100 - r := newTestReporter(1*time.Second, threshold) - - // report events - firstBatchSize := 5 - ee := getEvents(firstBatchSize) - for _, e := range ee { - r.Report(context.Background(), e) - } - - // check after delay for output - reportedEvents1, ack1 := r.Events() - if reportedCount := len(reportedEvents1); reportedCount != firstBatchSize { - t.Fatalf("expected %v events got %v", firstBatchSize, reportedCount) - } - - // report events > threshold - secondBatchSize := threshold + 1 - ee = getEvents(secondBatchSize) - for _, e := range ee { - r.Report(context.Background(), e) - } - - // check all events are returned - reportedEvents2, ack2 := r.Events() - if reportedCount := len(reportedEvents2); reportedCount == firstBatchSize+secondBatchSize { - t.Fatalf("expected %v events got %v", secondBatchSize, reportedCount) - } - - // ack second batch - ack2() - - reportedEvents, _ := r.Events() - if reportedCount := len(reportedEvents); reportedCount != 0 { - t.Fatalf("expected all events are removed after second batch ack, got %v events", reportedCount) - } - - defer func() { - r := recover() - if r != nil { - t.Fatalf("expected ack is ignored but it paniced: %v", r) - } - }() - - ack1() - reportedEvents, _ = r.Events() - if reportedCount := len(reportedEvents); reportedCount != 0 { - t.Fatalf("expected all events are still removed after first batch ack, got %v events", reportedCount) - } -} - -func TestAfterDrop(t *testing.T) { - // setup client - threshold := 7 - r := newTestReporter(1*time.Second, threshold) - - // report events - firstBatchSize := 5 - ee := getEvents(firstBatchSize) - for _, e := range ee { - r.Report(context.Background(), e) - } - - // check after delay for output - reportedEvents1, ack1 := r.Events() - if reportedCount := len(reportedEvents1); reportedCount != firstBatchSize { - t.Fatalf("expected %v events got %v", firstBatchSize, reportedCount) - } - - // report events > threshold - secondBatchSize := 5 - ee = getEvents(secondBatchSize) - for _, e := range ee { - r.Report(context.Background(), e) - } - - // check all events are returned - reportedEvents2, _ := r.Events() - if reportedCount := len(reportedEvents2); reportedCount != threshold { - t.Fatalf("expected %v events got %v", secondBatchSize, reportedCount) - } - - // remove first batch from queue - ack1() - - reportedEvents, _ := r.Events() - if reportedCount := len(reportedEvents); reportedCount != secondBatchSize { - t.Fatalf("expected all events from first batch are removed, got %v events", reportedCount) - } - -} - -func getEvents(count int) []reporter.Event { - ee := make([]reporter.Event, 0, count) - for i := 0; i < count; i++ { - ee = append(ee, testStateEvent{}) - } - - return ee -} - -func newTestReporter(frequency time.Duration, threshold int) *Reporter { - log, _ := logger.New("", false) - r := &Reporter{ - info: &testInfo{}, - queue: make([]fleetapi.SerializableEvent, 0), - logger: log, - threshold: threshold, - } - - return r -} - -type testInfo struct{} - -func (*testInfo) AgentID() string { return "agentID" } - -type testStateEvent struct{} - -func (testStateEvent) Type() string { return reporter.EventTypeState } -func (testStateEvent) SubType() string { return reporter.EventSubTypeInProgress } -func (testStateEvent) Time() time.Time { return time.Unix(0, 1) } -func (testStateEvent) Message() string { return "hello" } -func (testStateEvent) Payload() map[string]interface{} { return map[string]interface{}{"key": 1} } - -type testErrorEvent struct{} - -func (testErrorEvent) Type() string { return reporter.EventTypeError } -func (testErrorEvent) SubType() string { return "PATH" } -func (testErrorEvent) Time() time.Time { return time.Unix(0, 1) } -func (testErrorEvent) Message() string { return "hello" } -func (testErrorEvent) Payload() map[string]interface{} { return map[string]interface{}{"key": 1} } From 458c8aef4b9433c60856f34b3b169162c58d0a41 Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Mon, 12 Sep 2022 01:34:30 -0400 Subject: [PATCH 111/180] [Automation] Update elastic stack version to 8.5.0-589a4a10 for testing (#1147) Co-authored-by: apmmachine --- testing/environments/snapshot.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/testing/environments/snapshot.yml b/testing/environments/snapshot.yml index e3e0ef97547..1df05164579 100644 --- a/testing/environments/snapshot.yml +++ b/testing/environments/snapshot.yml @@ -3,7 +3,7 @@ version: '2.3' services: elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch:8.5.0-4140365c-SNAPSHOT + image: docker.elastic.co/elasticsearch/elasticsearch:8.5.0-589a4a10-SNAPSHOT # When extend is used it merges healthcheck.tests, see: # https://github.com/docker/compose/issues/8962 # healthcheck: @@ -42,7 +42,7 @@ services: - ./docker/logstash/pki:/etc/pki:ro kibana: - image: docker.elastic.co/kibana/kibana:8.5.0-4140365c-SNAPSHOT + image: docker.elastic.co/kibana/kibana:8.5.0-589a4a10-SNAPSHOT environment: - "ELASTICSEARCH_USERNAME=kibana_system_user" - "ELASTICSEARCH_PASSWORD=testing" From d811ef3195f7c5bf1d1e61f6b6c02504d3da4595 Mon Sep 17 00:00:00 2001 From: Michal Pristas Date: Mon, 12 Sep 2022 15:08:28 +0200 Subject: [PATCH 112/180] Avoid reporting `Unhealthy` on fleet connectivity issues (#1152) Avoid reporting `Unhealthy` on fleet connectivity issues (#1152) --- .../agent/application/gateway/fleet/fleet_gateway.go | 10 ++++------ .../application/gateway/fleet/fleet_gateway_test.go | 2 -- 2 files changed, 4 insertions(+), 8 deletions(-) diff --git a/internal/pkg/agent/application/gateway/fleet/fleet_gateway.go b/internal/pkg/agent/application/gateway/fleet/fleet_gateway.go index 3cce7073e3a..f6ff9b504f5 100644 --- a/internal/pkg/agent/application/gateway/fleet/fleet_gateway.go +++ b/internal/pkg/agent/application/gateway/fleet/fleet_gateway.go @@ -291,15 +291,13 @@ func (f *fleetGateway) doExecute() (*fleetapi.CheckinResponse, error) { ) f.log.Error(err) - f.statusReporter.Update(state.Failed, err.Error(), nil) return nil, err } if f.checkinFailCounter > 1 { - // Update status reporter for gateway to degraded when there are two consecutive failures. - // Note that this may not propagate to fleet-server as the agent is having issues checking in. - // It may also (falsely) report a degraded session for 30s if it is eventually successful. - // However this component will allow the agent to report fleet gateway degredation locally. - f.statusReporter.Update(state.Degraded, fmt.Sprintf("checkin failed: %v", err), nil) + // do not update status reporter with failure + // status reporter would report connection failure on first successful connection, leading to + // stale result for certain period causing slight confusion. + f.log.Errorf("checking number %d failed: %s", f.checkinFailCounter, err.Error()) } continue } diff --git a/internal/pkg/agent/application/gateway/fleet/fleet_gateway_test.go b/internal/pkg/agent/application/gateway/fleet/fleet_gateway_test.go index b02507e61ad..99cb0630385 100644 --- a/internal/pkg/agent/application/gateway/fleet/fleet_gateway_test.go +++ b/internal/pkg/agent/application/gateway/fleet/fleet_gateway_test.go @@ -25,7 +25,6 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/agent/application/paths" "github.com/elastic/elastic-agent/internal/pkg/agent/storage" "github.com/elastic/elastic-agent/internal/pkg/agent/storage/store" - "github.com/elastic/elastic-agent/internal/pkg/core/state" "github.com/elastic/elastic-agent/internal/pkg/fleetapi" noopacker "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker/noop" "github.com/elastic/elastic-agent/internal/pkg/scheduler" @@ -705,7 +704,6 @@ func TestRetriesOnFailures(t *testing.T) { queue.On("Actions").Return([]fleetapi.Action{}) fleetReporter := &testutils.MockReporter{} - fleetReporter.On("Update", state.Degraded, mock.Anything, mock.Anything).Times(2) fleetReporter.On("Update", mock.Anything, mock.Anything, mock.Anything).Maybe() fleetReporter.On("Unregister").Maybe() From c8c313d470434cbb76db7ea753c937d42c5dca21 Mon Sep 17 00:00:00 2001 From: Victor Martinez Date: Mon, 12 Sep 2022 16:53:50 +0100 Subject: [PATCH 113/180] ci: enable MacOS M1 stages (#1123) --- .ci/Jenkinsfile | 71 +++++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 66 insertions(+), 5 deletions(-) diff --git a/.ci/Jenkinsfile b/.ci/Jenkinsfile index ccca5538652..b98093faba0 100644 --- a/.ci/Jenkinsfile +++ b/.ci/Jenkinsfile @@ -43,6 +43,10 @@ pipeline { // disabled by default, but required for merge: // opt-in with 'ci:extended-windows' tag on PR booleanParam(name: 'extended_windows_ci', defaultValue: false, description: 'Enable Extended Windows tests') + + // disabled by default, but required for merge: + // opt-in with 'ci:extended-m1' tag on PR + booleanParam(name: 'extended_m1_ci', defaultValue: false, description: 'Enable M1 tests') } stages { stage('Checkout') { @@ -56,6 +60,7 @@ pipeline { setEnvVar('PACKAGING_CHANGES', isGitRegionMatch(patterns: [ '(^dev-tools/packaging/.*|.ci/Jenkinsfile)' ], shouldMatchAll: false).toString()) setEnvVar('K8S_CHANGES', isGitRegionMatch(patterns: [ '(^deploy/kubernetes/.*|^version/docs/version.asciidoc|.ci/Jenkinsfile)' ], shouldMatchAll: false).toString()) setEnvVar('EXT_WINDOWS_CHANGES', isGitRegionMatch(patterns: [ '.ci/Jenkinsfile' ], shouldMatchAll: false).toString()) + setEnvVar('EXT_M1_CHANGES', isGitRegionMatch(patterns: [ '.ci/Jenkinsfile' ], shouldMatchAll: false).toString()) } } } @@ -245,10 +250,10 @@ pipeline { } stage('Sync K8s') { //This stage opens a PR to kibana Repository in order to sync k8s manifests when { - // Only on main branch + // Only on main branch // Enable if k8s related changes. allOf { - branch 'main' // Only runs for branch main + branch 'main' // Only runs for branch main expression { return env.K8S_CHANGES == "true" } // If k8s changes } } @@ -267,11 +272,11 @@ pipeline { ./creator_k8s_manifest.sh . """) sh(label: '[Clone] Kibana-Repository', script: """ make ci-clone-kibana-repository - cp Makefile ./kibana + cp Makefile ./kibana cd kibana make ci-create-kubernetes-templates-pull-request """) } - } + } } post { always { @@ -364,6 +369,55 @@ pipeline { } } } + stage('m1') { + agent { label 'orka && darwin && aarch64' } + options { skipDefaultCheckout() } + when { + // Always when running builds on branches/tags + // Enable if extended M1 support related changes. + beforeAgent true + anyOf { + not { changeRequest() } + expression { return isExtendedM1Enabled() && env.ONLY_DOCS == "false"} + } + } + stages { + stage('build'){ + steps { + withGithubNotify(context: "Build-darwin-aarch64") { + deleteDir() + unstashV2(name: 'source', bucket: "${JOB_GCS_BUCKET}", credentialsId: "${JOB_GCS_CREDENTIALS}") + withMageEnv(){ + dir("${BASE_DIR}"){ + cmd(label: 'Go build', script: 'mage build') + } + } + } + } + } + stage('Test') { + steps { + withGithubNotify(context: "Test-darwin-aarch64") { + withMageEnv(){ + dir("${BASE_DIR}"){ + withEnv(["TEST_COVERAGE=${isCodeCoverageEnabled()}"]) { + cmd(label: 'Go unitTest', script: 'mage unitTest') + } + } + } + } + } + post { + always { + junit(allowEmptyResults: true, keepLongStdio: true, testResults: "${BASE_DIR}/build/TEST-*.xml") + whenTrue(isCodeCoverageEnabled()) { + coverageReport(baseDir: "**/build", reportFiles: 'TEST-go-unit.html', coverageFiles: 'TEST-go-unit-cov.xml') + } + } + } + } + } + } } post { cleanup { @@ -483,4 +537,11 @@ def isPackageEnabled() { */ def isExtendedWindowsEnabled() { return env.EXT_WINDOWS_CHANGES == "true" || params.extended_windows_ci || env.GITHUB_COMMENT?.contains('extended windows') || matchesPrLabel(label: 'ci:extended-windows') -} \ No newline at end of file +} + +/** +* Wrapper to know if the build should enable the M1 extended support +*/ +def isExtendedM1Enabled() { + return env.EXT_M1_CHANGES == "true" || params.extended_m1_ci || env.GITHUB_COMMENT?.contains('extended m1') || matchesPrLabel(label: 'ci:extended-m1') +} From 8a00e801bad3a7662a8add5a4106f5f5eec87036 Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Mon, 12 Sep 2022 16:56:54 -0400 Subject: [PATCH 114/180] [Automation] Update go release version to 1.18.6 (#1143) --- .go-version | 2 +- Dockerfile | 2 +- version/docs/version.asciidoc | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.go-version b/.go-version index 8e8b0a9335a..04a8bc26d16 100644 --- a/.go-version +++ b/.go-version @@ -1 +1 @@ -1.18.5 +1.18.6 diff --git a/Dockerfile b/Dockerfile index a4f9b4d338f..78bc8928198 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -ARG GO_VERSION=1.18.5 +ARG GO_VERSION=1.18.6 FROM circleci/golang:${GO_VERSION} diff --git a/version/docs/version.asciidoc b/version/docs/version.asciidoc index 4a1ae4fd6ea..db48ba622f8 100644 --- a/version/docs/version.asciidoc +++ b/version/docs/version.asciidoc @@ -1,6 +1,6 @@ :stack-version: 8.3.0 :doc-branch: main -:go-version: 1.18.5 +:go-version: 1.18.6 :release-state: unreleased :python: 3.7 :docker: 1.12 From 27300491a48ec1ea7d72dc6254ec1f03c1448ea6 Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Tue, 13 Sep 2022 01:36:49 -0400 Subject: [PATCH 115/180] [Automation] Update elastic stack version to 8.5.0-37418cf3 for testing (#1165) Co-authored-by: apmmachine --- testing/environments/snapshot.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/testing/environments/snapshot.yml b/testing/environments/snapshot.yml index 1df05164579..22b50aa43bc 100644 --- a/testing/environments/snapshot.yml +++ b/testing/environments/snapshot.yml @@ -3,7 +3,7 @@ version: '2.3' services: elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch:8.5.0-589a4a10-SNAPSHOT + image: docker.elastic.co/elasticsearch/elasticsearch:8.5.0-37418cf3-SNAPSHOT # When extend is used it merges healthcheck.tests, see: # https://github.com/docker/compose/issues/8962 # healthcheck: @@ -42,7 +42,7 @@ services: - ./docker/logstash/pki:/etc/pki:ro kibana: - image: docker.elastic.co/kibana/kibana:8.5.0-589a4a10-SNAPSHOT + image: docker.elastic.co/kibana/kibana:8.5.0-37418cf3-SNAPSHOT environment: - "ELASTICSEARCH_USERNAME=kibana_system_user" - "ELASTICSEARCH_PASSWORD=testing" From 9b174738534b2a1e83726b3dac699fcf2b448631 Mon Sep 17 00:00:00 2001 From: Tiago Queiroz Date: Tue, 13 Sep 2022 12:46:36 +0200 Subject: [PATCH 116/180] Remove mage notice in favour of make notice (#1108) The current implementation of mage notice is not working because it was never finalised, the fact that it and `make notice` exist only generates confusion. This commit removes the `mage notice` and documents that `make notice` should be used instead for the time being. In the long run we want to use the implementation on `elastic-agent-libs`, however it is not working at the moment. Closes #1107 Co-authored-by: Craig MacKenzie --- README.md | 17 +++++++++++++++++ magefile.go | 44 -------------------------------------------- 2 files changed, 17 insertions(+), 44 deletions(-) diff --git a/README.md b/README.md index faecad7b707..b1f581b38bb 100644 --- a/README.md +++ b/README.md @@ -103,3 +103,20 @@ kubectl apply -f elastic-agent-${ELASTIC_AGENT_MODE}-kubernetes.yaml ``` kubectl -n kube-system get pods -l app=elastic-agent ``` + +## Updating dependencies/PRs +Even though we prefer `mage` to our automation, we still have some +rules implemented on our `Makefile` as well as CI will use the +`Makefile`. CI will run `make check-ci`, so make sure to run it +locally before submitting any PRs to have a quicker feedback instead +of waiting for a CI failure. + +### Generating the `NOTICE.txt` when updating/adding dependencies +To do so, just run `make notice`, this is also part of the `make +check-ci` and is the same check our CI will do. + +At some point we will migrate it to mage (see discussion on +https://github.com/elastic/elastic-agent/pull/1108 and on +https://github.com/elastic/elastic-agent/issues/1107). However until +we have the mage automation sorted out, it has been removed to avoid +confusion. diff --git a/magefile.go b/magefile.go index a215f1639d7..66b648ac1d8 100644 --- a/magefile.go +++ b/magefile.go @@ -10,14 +10,12 @@ package main import ( "context" "fmt" - "io" "os" "os/exec" "path/filepath" "runtime" "strconv" "strings" - "sync" "time" "github.com/hashicorp/go-multierror" @@ -92,48 +90,6 @@ type Demo mg.Namespace // Dev runs package and build for dev purposes. type Dev mg.Namespace -// Notice regenerates the NOTICE.txt file. -func Notice() error { - fmt.Println(">> Generating NOTICE") - fmt.Println(">> fmt - go mod tidy") - err := sh.RunV("go", "mod", "tidy", "-v") - if err != nil { - return errors.Wrap(err, "failed running go mod tidy, please fix the issues reported") - } - fmt.Println(">> fmt - go mod download") - err = sh.RunV("go", "mod", "download") - if err != nil { - return errors.Wrap(err, "failed running go mod download, please fix the issues reported") - } - fmt.Println(">> fmt - go list") - str, err := sh.Output("go", "list", "-m", "-json", "all") - if err != nil { - return errors.Wrap(err, "failed running go list, please fix the issues reported") - } - fmt.Println(">> fmt - go run") - cmd := exec.Command("go", "run", "go.elastic.co/go-licence-detector", "-includeIndirect", "-rules", "dev-tools/notice/rules.json", "-overrides", "dev-tools/notice/overrides.json", "-noticeTemplate", "dev-tools/notice/NOTICE.txt.tmpl", - "-noticeOut", "NOTICE.txt", "-depsOut", "\"\"") - stdin, err := cmd.StdinPipe() - if err != nil { - return errors.Wrap(err, "failed running go run, please fix the issues reported") - } - var wg sync.WaitGroup - wg.Add(1) - go func() { - defer stdin.Close() - defer wg.Done() - if _, err := io.WriteString(stdin, str); err != nil { - fmt.Println(err) - } - }() - wg.Wait() - _, err = cmd.CombinedOutput() - if err != nil { - return errors.Wrap(err, "failed combined output, please fix the issues reported") - } - return nil -} - func CheckNoChanges() error { fmt.Println(">> fmt - go run") err := sh.RunV("go", "mod", "tidy", "-v") From 6fb3a93a313e7fa32e7fc5cf91b2ff7893fef893 Mon Sep 17 00:00:00 2001 From: Victor Martinez Date: Tue, 13 Sep 2022 12:17:39 +0100 Subject: [PATCH 117/180] ci: run e2e-testing at the end (#1169) --- .ci/Jenkinsfile | 45 ++++++++++++++++++++++----------------------- 1 file changed, 22 insertions(+), 23 deletions(-) diff --git a/.ci/Jenkinsfile b/.ci/Jenkinsfile index b98093faba0..71ac9188b13 100644 --- a/.ci/Jenkinsfile +++ b/.ci/Jenkinsfile @@ -286,29 +286,6 @@ pipeline { } } } - stage('e2e tests') { - when { - // Always when running builds on branches/tags - // Enable if e2e related changes. - beforeAgent true - anyOf { - not { changeRequest() } - // package artifacts are not generated if ONLY_DOCS, therefore e2e should not run if ONLY_DOCS - expression { return isE2eEnabled() && env.ONLY_DOCS == "false"} - } - } - steps { - // TODO: what's the testMatrixFile to be used if any - runE2E(testMatrixFile: '.ci/.e2e-tests-for-elastic-agent.yaml', - beatVersion: "${env.BEAT_VERSION}-SNAPSHOT", - elasticAgentVersion: "${env.BEAT_VERSION}-SNAPSHOT", - gitHubCheckName: "e2e-tests", - gitHubCheckRepo: env.REPO, - gitHubCheckSha1: env.GIT_BASE_COMMIT, - propagate: true, - wait: true) - } - } stage('extended windows') { when { // Always when running builds on branches/tags @@ -418,6 +395,28 @@ pipeline { } } } + stage('e2e tests') { + when { + // Always when running builds on branches/tags + // Enable if e2e related changes. + beforeAgent true + anyOf { + not { changeRequest() } + // package artifacts are not generated if ONLY_DOCS, therefore e2e should not run if ONLY_DOCS + expression { return isE2eEnabled() && env.ONLY_DOCS == "false"} + } + } + steps { + runE2E(testMatrixFile: '.ci/.e2e-tests-for-elastic-agent.yaml', + beatVersion: "${env.BEAT_VERSION}-SNAPSHOT", + elasticAgentVersion: "${env.BEAT_VERSION}-SNAPSHOT", + gitHubCheckName: "e2e-tests", + gitHubCheckRepo: env.REPO, + gitHubCheckSha1: env.GIT_BASE_COMMIT, + propagate: true, + wait: true) + } + } } post { cleanup { From 3fb21eafd0bdf539bf5316f28f9b3c3b5c79574d Mon Sep 17 00:00:00 2001 From: Victor Martinez Date: Tue, 13 Sep 2022 18:32:00 +0100 Subject: [PATCH 118/180] ci: move macos to github actions (#1175) --- .ci/Jenkinsfile | 3 ++- .github/workflows/macos.yml | 25 +++++++++++++++++++++++++ 2 files changed, 27 insertions(+), 1 deletion(-) create mode 100644 .github/workflows/macos.yml diff --git a/.ci/Jenkinsfile b/.ci/Jenkinsfile index 71ac9188b13..e1c0de3a2c6 100644 --- a/.ci/Jenkinsfile +++ b/.ci/Jenkinsfile @@ -89,7 +89,8 @@ pipeline { axes { axis { name 'PLATFORM' - values 'ubuntu-20.04 && immutable', 'aws && aarch64', 'windows-2016 && windows-immutable', 'windows-2022 && windows-immutable', 'macos12 && x86_64' + // Orka workers are not healthy (memory and connectivity issues) + values 'ubuntu-20.04 && immutable', 'aws && aarch64', 'windows-2016 && windows-immutable', 'windows-2022 && windows-immutable' //, 'macos12 && x86_64' } } stages { diff --git a/.github/workflows/macos.yml b/.github/workflows/macos.yml new file mode 100644 index 00000000000..bf3e5eed775 --- /dev/null +++ b/.github/workflows/macos.yml @@ -0,0 +1,25 @@ +name: macos + +on: + pull_request: + push: + branches: + - main + - 8.* + +jobs: + macos: + runs-on: macos-latest + steps: + - uses: actions/checkout@v3 + - name: Fetch Go version from .go-version + run: echo "GO_VERSION=$(cat .go-version)" >> $GITHUB_ENV + - uses: actions/setup-go@v3 + with: + go-version: ${{ env.GO_VERSION }} + - name: Install dependencies + run: go install github.com/magefile/mage + - name: Run build + run: mage build + - name: Run test + run: mage unitTest From eb3a698f578aacddade0a968f4fcf4d19202ebb1 Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Wed, 14 Sep 2022 01:41:55 -0400 Subject: [PATCH 119/180] [Automation] Update elastic stack version to 8.5.0-fcf3d4c2 for testing (#1183) Co-authored-by: apmmachine --- testing/environments/snapshot.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/testing/environments/snapshot.yml b/testing/environments/snapshot.yml index 22b50aa43bc..3c8cb77b9bd 100644 --- a/testing/environments/snapshot.yml +++ b/testing/environments/snapshot.yml @@ -3,7 +3,7 @@ version: '2.3' services: elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch:8.5.0-37418cf3-SNAPSHOT + image: docker.elastic.co/elasticsearch/elasticsearch:8.5.0-fcf3d4c2-SNAPSHOT # When extend is used it merges healthcheck.tests, see: # https://github.com/docker/compose/issues/8962 # healthcheck: @@ -42,7 +42,7 @@ services: - ./docker/logstash/pki:/etc/pki:ro kibana: - image: docker.elastic.co/kibana/kibana:8.5.0-37418cf3-SNAPSHOT + image: docker.elastic.co/kibana/kibana:8.5.0-fcf3d4c2-SNAPSHOT environment: - "ELASTICSEARCH_USERNAME=kibana_system_user" - "ELASTICSEARCH_PASSWORD=testing" From 59f9ac46ec61208005dc2b9806e2fbe10af36c40 Mon Sep 17 00:00:00 2001 From: Chris Mark Date: Wed, 14 Sep 2022 11:18:06 +0300 Subject: [PATCH 120/180] Add support for hints' based autodiscovery in kubernetes provider (#698) --- CHANGELOG.next.asciidoc | 1 + NOTICE.txt | 4 +- go.mod | 3 +- go.sum | 13 +- internal/pkg/agent/application/local_mode.go | 2 +- .../pkg/agent/application/managed_mode.go | 2 +- .../agent/application/managed_mode_test.go | 2 +- internal/pkg/agent/cmd/inspect.go | 2 +- internal/pkg/agent/install/uninstall.go | 2 +- internal/pkg/composable/context.go | 6 +- internal/pkg/composable/controller.go | 6 +- internal/pkg/composable/controller_test.go | 8 +- internal/pkg/composable/dynamic.go | 25 +- .../pkg/composable/providers/agent/agent.go | 4 +- .../composable/providers/agent/agent_test.go | 2 +- .../pkg/composable/providers/docker/docker.go | 2 +- internal/pkg/composable/providers/env/env.go | 4 +- .../pkg/composable/providers/env/env_test.go | 2 +- .../pkg/composable/providers/host/host.go | 4 +- .../composable/providers/host/host_test.go | 4 +- .../composable/providers/kubernetes/config.go | 6 + .../composable/providers/kubernetes/hints.go | 258 +++++++++++++++ .../providers/kubernetes/hints_test.go | 301 ++++++++++++++++++ .../providers/kubernetes/kubernetes.go | 23 +- .../composable/providers/kubernetes/node.go | 3 +- .../providers/kubernetes/node_test.go | 5 - .../composable/providers/kubernetes/pod.go | 67 +++- .../providers/kubernetes/pod_test.go | 44 ++- .../providers/kubernetes/service.go | 3 +- .../providers/kubernetes/service_test.go | 5 - .../kubernetes_leaderelection.go | 2 +- .../kubernetessecrets/kubernetes_secrets.go | 2 +- .../kubernetes_secrets_test.go | 4 +- .../pkg/composable/providers/local/local.go | 4 +- .../composable/providers/local/local_test.go | 2 +- .../providers/localdynamic/localdynamic.go | 4 +- .../localdynamic/localdynamic_test.go | 2 +- .../pkg/composable/providers/path/path.go | 4 +- .../composable/providers/path/path_test.go | 2 +- internal/pkg/composable/testing/dynamic.go | 2 + 40 files changed, 745 insertions(+), 96 deletions(-) create mode 100644 internal/pkg/composable/providers/kubernetes/hints.go create mode 100644 internal/pkg/composable/providers/kubernetes/hints_test.go diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 48109ba3c0d..6047c71b3d7 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -201,3 +201,4 @@ - Redact sensitive information on diagnostics collect command. {issue}[241] {pull}[566] - Fix incorrectly creating a filebeat redis input when a policy contains a packetbeat redis input. {issue}[427] {pull}[700] - Add `lumberjack` input type to the Filebeat spec. {pull}[959] +- Add support for hints' based autodiscovery in kubernetes provider. {pull}[698] diff --git a/NOTICE.txt b/NOTICE.txt index 7b0395cd0e0..79a332f3d45 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -617,11 +617,11 @@ you may not use this file except in compliance with the Elastic License. -------------------------------------------------------------------------------- Dependency : github.com/elastic/elastic-agent-autodiscover -Version: v0.0.0-20220404145827-89887023c1ab +Version: v0.2.1 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/elastic/elastic-agent-autodiscover@v0.0.0-20220404145827-89887023c1ab/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/elastic/elastic-agent-autodiscover@v0.2.1/LICENSE: Apache License Version 2.0, January 2004 diff --git a/go.mod b/go.mod index 9f0dfb21464..2557e2109d4 100644 --- a/go.mod +++ b/go.mod @@ -11,7 +11,7 @@ require ( github.com/coreos/go-systemd/v22 v22.3.2 github.com/docker/go-units v0.4.0 github.com/elastic/e2e-testing v1.99.2-0.20220117192005-d3365c99b9c4 - github.com/elastic/elastic-agent-autodiscover v0.0.0-20220404145827-89887023c1ab + github.com/elastic/elastic-agent-autodiscover v0.2.1 github.com/elastic/elastic-agent-client/v7 v7.0.0-20210727140539-f0905d9377f6 github.com/elastic/elastic-agent-libs v0.2.6 github.com/elastic/elastic-agent-system-metrics v0.3.0 @@ -119,7 +119,6 @@ require ( go.elastic.co/apm/v2 v2.0.0 // indirect go.elastic.co/fastjson v1.1.0 // indirect go.uber.org/atomic v1.9.0 // indirect - go.uber.org/goleak v1.1.12 // indirect go.uber.org/multierr v1.8.0 // indirect golang.org/x/mod v0.5.1 // indirect golang.org/x/net v0.0.0-20220225172249-27dd8689420f // indirect diff --git a/go.sum b/go.sum index 22059e4c0b3..0728fa89909 100644 --- a/go.sum +++ b/go.sum @@ -92,6 +92,7 @@ github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2 github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00= github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+VxGOoXdC600= github.com/Microsoft/hcsshim v0.8.21/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4= +github.com/Microsoft/hcsshim v0.8.24/go.mod h1:4zegtUJth7lAvFyc6cH2gGQ5B3OFQim01nnU2M8jKDg= github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU= github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= @@ -227,6 +228,7 @@ github.com/containerd/cgroups v0.0.0-20200710171044-318312a37340/go.mod h1:s5q4S github.com/containerd/cgroups v0.0.0-20200824123100-0b889c03f102/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo= github.com/containerd/cgroups v0.0.0-20210114181951-8a68de567b68/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= github.com/containerd/cgroups v1.0.1/go.mod h1:0SJrPIenamHDcZhEcJMNBB85rHcUsw4f25ZfBiPYRkU= +github.com/containerd/cgroups v1.0.3/go.mod h1:/ofk34relqNjSGyqPrmEULrO4Sc8LJhvJmWbUCUKqj8= github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= github.com/containerd/console v0.0.0-20191206165004-02ecf6a7291e/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE= @@ -240,6 +242,7 @@ github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMX github.com/containerd/containerd v1.4.0-beta.2.0.20200729163537-40b22ef07410/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.4.1/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.4.3/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.4.9/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.5.0-beta.1/go.mod h1:5HfvG1V2FsKesEGQ17k5/T7V960Tmcumvqn8Mc+pCYQ= github.com/containerd/containerd v1.5.0-beta.3/go.mod h1:/wr9AVtEM7x9c+n0+stptlo/uBBoBORwEx6ardVcmKU= github.com/containerd/containerd v1.5.0-beta.4/go.mod h1:GmdgZd2zA2GYIBZ0w09ZvgqEq8EfBp/m3lcVZIvPHhI= @@ -281,6 +284,7 @@ github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDG github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8= github.com/containerd/ttrpc v1.0.1/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y= github.com/containerd/ttrpc v1.0.2/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y= +github.com/containerd/ttrpc v1.1.0/go.mod h1:XX4ZTnoOId4HklF4edwc4DcqskFZuvXB1Evzy5KFQpQ= github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc= github.com/containerd/typeurl v0.0.0-20190911142611-5eb25027c9fd/go.mod h1:GeKYzf2pQcqv7tJ0AoCuuhtnqhva5LNU3U+OyKxxJpk= github.com/containerd/typeurl v1.0.1/go.mod h1:TB1hUtrpaiO88KEK56ijojHS1+NeF0izUACaJW2mdXg= @@ -375,11 +379,11 @@ github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:Htrtb github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/elastic/e2e-testing v1.99.2-0.20220117192005-d3365c99b9c4 h1:uYT+Krd8dsvnhnLK9pe/JHZkYtXEGPfbV4Wt1JPPol0= github.com/elastic/e2e-testing v1.99.2-0.20220117192005-d3365c99b9c4/go.mod h1:UcNuf4pX/qDVNQr0zybm1NL2YoWik+jKBaINZqQCA40= -github.com/elastic/elastic-agent-autodiscover v0.0.0-20220404145827-89887023c1ab h1:Jk6Mfk5BF8gtfE7X0bNCiDGBtwJVxRI79b4wLCAsP+A= -github.com/elastic/elastic-agent-autodiscover v0.0.0-20220404145827-89887023c1ab/go.mod h1:Gg1fsQI+rVms9FJ2DefBSojfPIzgkV8xlyG8fPG0DE8= +github.com/elastic/elastic-agent-autodiscover v0.2.1 h1:Nbeayh3vq2FNm6xaFo34mhUdOu0EVlpj53CqCsbU0E4= +github.com/elastic/elastic-agent-autodiscover v0.2.1/go.mod h1:gPnzzfdYNdgznAb+iG9eyyXaQXBbAMHa+Y6Z8hXfcGY= github.com/elastic/elastic-agent-client/v7 v7.0.0-20210727140539-f0905d9377f6 h1:nFvXHBjYK3e9+xF0WKDeAKK4aOO51uC28s+L9rBmilo= github.com/elastic/elastic-agent-client/v7 v7.0.0-20210727140539-f0905d9377f6/go.mod h1:uh/Gj9a0XEbYoM4NYz4LvaBVARz3QXLmlNjsrKY9fTc= -github.com/elastic/elastic-agent-libs v0.0.0-20220303160015-5b4e674da3dd/go.mod h1://82M1l73IHx0wDbS2Tzkq6Fx9fkmytS1KgkIyzvNTM= +github.com/elastic/elastic-agent-libs v0.2.5/go.mod h1:chO3rtcLyGlKi9S0iGVZhYCzDfdDsAQYBc+ui588AFE= github.com/elastic/elastic-agent-libs v0.2.6 h1:DpcUcCVYZ7lNtHLUlyT1u/GtGAh49wpL15DTH7+8O5o= github.com/elastic/elastic-agent-libs v0.2.6/go.mod h1:chO3rtcLyGlKi9S0iGVZhYCzDfdDsAQYBc+ui588AFE= github.com/elastic/elastic-agent-system-metrics v0.3.0 h1:W8L0E8lWJmdguH+oIR7OzuFgopvw8ucZAE9w6iqVlpE= @@ -390,6 +394,7 @@ github.com/elastic/go-elasticsearch/v8 v8.0.0-20210317102009-a9d74cec0186/go.mod github.com/elastic/go-licenser v0.3.1/go.mod h1:D8eNQk70FOCVBl3smCGQt/lv7meBeQno2eI1S5apiHQ= github.com/elastic/go-licenser v0.4.0 h1:jLq6A5SilDS/Iz1ABRkO6BHy91B9jBora8FwGRsDqUI= github.com/elastic/go-licenser v0.4.0/go.mod h1:V56wHMpmdURfibNBggaSBfqgPxyT1Tldns1i87iTEvU= +github.com/elastic/go-structform v0.0.9/go.mod h1:CZWf9aIRYY5SuKSmOhtXScE5uQiLZNqAFnwKR4OrIM4= github.com/elastic/go-structform v0.0.10 h1:oy08o/Ih2hHTkNcRY/1HhaYvIp5z6t8si8gnCJPDo1w= github.com/elastic/go-structform v0.0.10/go.mod h1:CZWf9aIRYY5SuKSmOhtXScE5uQiLZNqAFnwKR4OrIM4= github.com/elastic/go-sysinfo v1.1.1/go.mod h1:i1ZYdU10oLNfRzq4vq62BEwD2fH8KaWh6eh0ikPT9F0= @@ -946,7 +951,6 @@ github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8 github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.0.0/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/opencontainers/image-spec v1.0.2-0.20190823105129-775207bd45b6/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM= github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= @@ -1397,6 +1401,7 @@ golang.org/x/net v0.0.0-20210520170846-37e1c6afe023/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211020060615-d418f374d309/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= diff --git a/internal/pkg/agent/application/local_mode.go b/internal/pkg/agent/application/local_mode.go index aae202b114f..ecd988bc7a8 100644 --- a/internal/pkg/agent/application/local_mode.go +++ b/internal/pkg/agent/application/local_mode.go @@ -114,7 +114,7 @@ func newLocal( } localApplication.router = router - composableCtrl, err := composable.New(log, rawConfig) + composableCtrl, err := composable.New(log, rawConfig, false) if err != nil { return nil, errors.New(err, "failed to initialize composable controller") } diff --git a/internal/pkg/agent/application/managed_mode.go b/internal/pkg/agent/application/managed_mode.go index 037cf74ad5c..a9903733762 100644 --- a/internal/pkg/agent/application/managed_mode.go +++ b/internal/pkg/agent/application/managed_mode.go @@ -135,7 +135,7 @@ func newManaged( } managedApplication.router = router - composableCtrl, err := composable.New(log, rawConfig) + composableCtrl, err := composable.New(log, rawConfig, true) if err != nil { return nil, errors.New(err, "failed to initialize composable controller") } diff --git a/internal/pkg/agent/application/managed_mode_test.go b/internal/pkg/agent/application/managed_mode_test.go index 7f111eae322..ebba108b7ec 100644 --- a/internal/pkg/agent/application/managed_mode_test.go +++ b/internal/pkg/agent/application/managed_mode_test.go @@ -45,7 +45,7 @@ func TestManagedModeRouting(t *testing.T) { router, _ := router.New(log, streamFn) agentInfo, _ := info.NewAgentInfo(true) nullStore := &storage.NullStore{} - composableCtrl, _ := composable.New(log, nil) + composableCtrl, _ := composable.New(log, nil, true) emit, err := emitter.New(ctx, log, agentInfo, composableCtrl, router, &pipeline.ConfigModifiers{Decorators: []pipeline.DecoratorFunc{modifiers.InjectMonitoring}}, nil) require.NoError(t, err) diff --git a/internal/pkg/agent/cmd/inspect.go b/internal/pkg/agent/cmd/inspect.go index e6284d56487..2faf951cd16 100644 --- a/internal/pkg/agent/cmd/inspect.go +++ b/internal/pkg/agent/cmd/inspect.go @@ -258,7 +258,7 @@ func getProgramsFromConfig(log *logger.Logger, agentInfo *info.AgentInfo, cfg *c router := &inmemRouter{} ctx, cancel := context.WithCancel(context.Background()) defer cancel() - composableCtrl, err := composable.New(log, cfg) + composableCtrl, err := composable.New(log, cfg, false) if err != nil { return nil, err } diff --git a/internal/pkg/agent/install/uninstall.go b/internal/pkg/agent/install/uninstall.go index 598ddaeea8c..c2881302678 100644 --- a/internal/pkg/agent/install/uninstall.go +++ b/internal/pkg/agent/install/uninstall.go @@ -250,7 +250,7 @@ func applyDynamics(ctx context.Context, log *logger.Logger, cfg *config.Config) }) } - ctrl, err := composable.New(log, cfg) + ctrl, err := composable.New(log, cfg, false) if err != nil { return nil, err } diff --git a/internal/pkg/composable/context.go b/internal/pkg/composable/context.go index 1dcb50cf956..d0ad4179e87 100644 --- a/internal/pkg/composable/context.go +++ b/internal/pkg/composable/context.go @@ -14,8 +14,9 @@ import ( ) // ContextProviderBuilder creates a new context provider based on the given config and returns it. -type ContextProviderBuilder func(log *logger.Logger, config *config.Config) (corecomp.ContextProvider, error) +type ContextProviderBuilder func(log *logger.Logger, config *config.Config, managed bool) (corecomp.ContextProvider, error) +//nolint:dupl,goimports,nolintlint // false positive // AddContextProvider adds a new ContextProviderBuilder func (r *providerRegistry) AddContextProvider(name string, builder ContextProviderBuilder) error { r.lock.Lock() @@ -24,11 +25,14 @@ func (r *providerRegistry) AddContextProvider(name string, builder ContextProvid if name == "" { return fmt.Errorf("provider name is required") } + if strings.ToLower(name) != name { return fmt.Errorf("provider name must be lowercase") } + _, contextExists := r.contextProviders[name] _, dynamicExists := r.dynamicProviders[name] + if contextExists || dynamicExists { return fmt.Errorf("provider '%s' is already registered", name) } diff --git a/internal/pkg/composable/controller.go b/internal/pkg/composable/controller.go index a14e111194f..d94b9cda7d7 100644 --- a/internal/pkg/composable/controller.go +++ b/internal/pkg/composable/controller.go @@ -40,7 +40,7 @@ type controller struct { } // New creates a new controller. -func New(log *logger.Logger, c *config.Config) (Controller, error) { +func New(log *logger.Logger, c *config.Config, managed bool) (Controller, error) { l := log.Named("composable") var providersCfg Config @@ -59,7 +59,7 @@ func New(log *logger.Logger, c *config.Config) (Controller, error) { // explicitly disabled; skipping continue } - provider, err := builder(l, pCfg) + provider, err := builder(l, pCfg, managed) if err != nil { return nil, errors.New(err, fmt.Sprintf("failed to build provider '%s'", name), errors.TypeConfig, errors.M("provider", name)) } @@ -76,7 +76,7 @@ func New(log *logger.Logger, c *config.Config) (Controller, error) { // explicitly disabled; skipping continue } - provider, err := builder(l.Named(strings.Join([]string{"providers", name}, ".")), pCfg) + provider, err := builder(l.Named(strings.Join([]string{"providers", name}, ".")), pCfg, managed) if err != nil { return nil, errors.New(err, fmt.Sprintf("failed to build provider '%s'", name), errors.TypeConfig, errors.M("provider", name)) } diff --git a/internal/pkg/composable/controller_test.go b/internal/pkg/composable/controller_test.go index 09780767928..2ba71f33243 100644 --- a/internal/pkg/composable/controller_test.go +++ b/internal/pkg/composable/controller_test.go @@ -77,7 +77,7 @@ func TestController(t *testing.T) { log, err := logger.New("", false) require.NoError(t, err) - c, err := composable.New(log, cfg) + c, err := composable.New(log, cfg, false) require.NoError(t, err) var wg sync.WaitGroup @@ -99,14 +99,14 @@ func TestController(t *testing.T) { _, envExists := setVars[0].Lookup("env") assert.False(t, envExists) local, _ := setVars[0].Lookup("local") - localMap := local.(map[string]interface{}) + localMap, _ := local.(map[string]interface{}) assert.Equal(t, "value1", localMap["key1"]) local, _ = setVars[1].Lookup("local_dynamic") - localMap = local.(map[string]interface{}) + localMap, _ = local.(map[string]interface{}) assert.Equal(t, "value1", localMap["key1"]) local, _ = setVars[2].Lookup("local_dynamic") - localMap = local.(map[string]interface{}) + localMap, _ = local.(map[string]interface{}) assert.Equal(t, "value2", localMap["key1"]) } diff --git a/internal/pkg/composable/dynamic.go b/internal/pkg/composable/dynamic.go index a0de3543a1c..b8e55249a4d 100644 --- a/internal/pkg/composable/dynamic.go +++ b/internal/pkg/composable/dynamic.go @@ -34,30 +34,31 @@ type DynamicProvider interface { } // DynamicProviderBuilder creates a new dynamic provider based on the given config and returns it. -type DynamicProviderBuilder func(log *logger.Logger, config *config.Config) (DynamicProvider, error) +type DynamicProviderBuilder func(log *logger.Logger, config *config.Config, managed bool) (DynamicProvider, error) +//nolint:dupl,goimports,nolintlint // false positive // AddDynamicProvider adds a new DynamicProviderBuilder -func (r *providerRegistry) AddDynamicProvider(name string, builder DynamicProviderBuilder) error { +func (r *providerRegistry) AddDynamicProvider(providerName string, builder DynamicProviderBuilder) error { r.lock.Lock() defer r.lock.Unlock() - if name == "" { - return fmt.Errorf("provider name is required") + if providerName == "" { + return fmt.Errorf("provider providerName is required") } - if strings.ToLower(name) != name { - return fmt.Errorf("provider name must be lowercase") + if strings.ToLower(providerName) != providerName { + return fmt.Errorf("provider providerName must be lowercase") } - _, contextExists := r.contextProviders[name] - _, dynamicExists := r.dynamicProviders[name] + _, contextExists := r.contextProviders[providerName] + _, dynamicExists := r.dynamicProviders[providerName] if contextExists || dynamicExists { - return fmt.Errorf("provider '%s' is already registered", name) + return fmt.Errorf("provider '%s' is already registered", providerName) } if builder == nil { - return fmt.Errorf("provider '%s' cannot be registered with a nil factory", name) + return fmt.Errorf("provider '%s' cannot be registered with a nil factory", providerName) } - r.dynamicProviders[name] = builder - r.logger.Debugf("Registered provider: %s", name) + r.dynamicProviders[providerName] = builder + r.logger.Debugf("Registered provider: %s", providerName) return nil } diff --git a/internal/pkg/composable/providers/agent/agent.go b/internal/pkg/composable/providers/agent/agent.go index 2b9d0ff3deb..5578dd84d28 100644 --- a/internal/pkg/composable/providers/agent/agent.go +++ b/internal/pkg/composable/providers/agent/agent.go @@ -15,7 +15,7 @@ import ( ) func init() { - composable.Providers.AddContextProvider("agent", ContextProviderBuilder) + _ = composable.Providers.AddContextProvider("agent", ContextProviderBuilder) } type contextProvider struct{} @@ -42,6 +42,6 @@ func (*contextProvider) Run(comm corecomp.ContextProviderComm) error { } // ContextProviderBuilder builds the context provider. -func ContextProviderBuilder(_ *logger.Logger, _ *config.Config) (corecomp.ContextProvider, error) { +func ContextProviderBuilder(_ *logger.Logger, _ *config.Config, managed bool) (corecomp.ContextProvider, error) { return &contextProvider{}, nil } diff --git a/internal/pkg/composable/providers/agent/agent_test.go b/internal/pkg/composable/providers/agent/agent_test.go index f3c6904b05c..cd15e8058ea 100644 --- a/internal/pkg/composable/providers/agent/agent_test.go +++ b/internal/pkg/composable/providers/agent/agent_test.go @@ -20,7 +20,7 @@ func TestContextProvider(t *testing.T) { testutils.InitStorage(t) builder, _ := composable.Providers.GetContextProvider("agent") - provider, err := builder(nil, nil) + provider, err := builder(nil, nil, true) require.NoError(t, err) comm := ctesting.NewContextComm(context.Background()) diff --git a/internal/pkg/composable/providers/docker/docker.go b/internal/pkg/composable/providers/docker/docker.go index 4bdc6d11cfe..b832cbb6c92 100644 --- a/internal/pkg/composable/providers/docker/docker.go +++ b/internal/pkg/composable/providers/docker/docker.go @@ -105,7 +105,7 @@ func (c *dynamicProvider) Run(comm composable.DynamicProviderComm) error { } // DynamicProviderBuilder builds the dynamic provider. -func DynamicProviderBuilder(logger *logger.Logger, c *config.Config) (composable.DynamicProvider, error) { +func DynamicProviderBuilder(logger *logger.Logger, c *config.Config, managed bool) (composable.DynamicProvider, error) { var cfg Config if c == nil { c = config.New() diff --git a/internal/pkg/composable/providers/env/env.go b/internal/pkg/composable/providers/env/env.go index 4c6b5911f47..b7b521c85d1 100644 --- a/internal/pkg/composable/providers/env/env.go +++ b/internal/pkg/composable/providers/env/env.go @@ -16,7 +16,7 @@ import ( ) func init() { - composable.Providers.AddContextProvider("env", ContextProviderBuilder) + _ = composable.Providers.AddContextProvider("env", ContextProviderBuilder) } type contextProvider struct{} @@ -31,7 +31,7 @@ func (*contextProvider) Run(comm corecomp.ContextProviderComm) error { } // ContextProviderBuilder builds the context provider. -func ContextProviderBuilder(_ *logger.Logger, _ *config.Config) (corecomp.ContextProvider, error) { +func ContextProviderBuilder(_ *logger.Logger, _ *config.Config, managed bool) (corecomp.ContextProvider, error) { return &contextProvider{}, nil } diff --git a/internal/pkg/composable/providers/env/env_test.go b/internal/pkg/composable/providers/env/env_test.go index f41f6200697..a03f37ee577 100644 --- a/internal/pkg/composable/providers/env/env_test.go +++ b/internal/pkg/composable/providers/env/env_test.go @@ -17,7 +17,7 @@ import ( func TestContextProvider(t *testing.T) { builder, _ := composable.Providers.GetContextProvider("env") - provider, err := builder(nil, nil) + provider, err := builder(nil, nil, true) require.NoError(t, err) comm := ctesting.NewContextComm(context.Background()) diff --git a/internal/pkg/composable/providers/host/host.go b/internal/pkg/composable/providers/host/host.go index 25d53430a2f..41498de79cc 100644 --- a/internal/pkg/composable/providers/host/host.go +++ b/internal/pkg/composable/providers/host/host.go @@ -24,7 +24,7 @@ import ( const DefaultCheckInterval = 5 * time.Minute func init() { - composable.Providers.AddContextProvider("host", ContextProviderBuilder) + _ = composable.Providers.AddContextProvider("host", ContextProviderBuilder) } type infoFetcher func() (map[string]interface{}, error) @@ -81,7 +81,7 @@ func (c *contextProvider) Run(comm corecomp.ContextProviderComm) error { } // ContextProviderBuilder builds the context provider. -func ContextProviderBuilder(log *logger.Logger, c *config.Config) (corecomp.ContextProvider, error) { +func ContextProviderBuilder(log *logger.Logger, c *config.Config, managed bool) (corecomp.ContextProvider, error) { p := &contextProvider{ logger: log, fetcher: getHostInfo, diff --git a/internal/pkg/composable/providers/host/host_test.go b/internal/pkg/composable/providers/host/host_test.go index 8e117fcbeb4..30b9619bfc6 100644 --- a/internal/pkg/composable/providers/host/host_test.go +++ b/internal/pkg/composable/providers/host/host_test.go @@ -33,10 +33,10 @@ func TestContextProvider(t *testing.T) { builder, _ := composable.Providers.GetContextProvider("host") log, err := logger.New("host_test", false) require.NoError(t, err) - provider, err := builder(log, c) + provider, err := builder(log, c, true) require.NoError(t, err) - hostProvider := provider.(*contextProvider) + hostProvider, _ := provider.(*contextProvider) hostProvider.fetcher = returnHostMapping() require.Equal(t, 100*time.Millisecond, hostProvider.CheckInterval) diff --git a/internal/pkg/composable/providers/kubernetes/config.go b/internal/pkg/composable/providers/kubernetes/config.go index 9bec67b66b8..4a97b417c59 100644 --- a/internal/pkg/composable/providers/kubernetes/config.go +++ b/internal/pkg/composable/providers/kubernetes/config.go @@ -7,6 +7,8 @@ package kubernetes import ( "time" + "github.com/elastic/elastic-agent-libs/config" + "github.com/elastic/elastic-agent-autodiscover/kubernetes" "github.com/elastic/elastic-agent-autodiscover/kubernetes/metadata" "github.com/elastic/elastic-agent-libs/logp" @@ -34,6 +36,9 @@ type Config struct { LabelsDedot bool `config:"labels.dedot"` AnnotationsDedot bool `config:"annotations.dedot"` + + Hints *config.C `config:"hints"` + Prefix string `config:"prefix"` } // Resources config section for resources' config blocks @@ -56,6 +61,7 @@ func (c *Config) InitDefaults() { c.LabelsDedot = true c.AnnotationsDedot = true c.AddResourceMetadata = metadata.GetDefaultResourceMetadataConfig() + c.Prefix = "co.elastic" } // Validate ensures correctness of config diff --git a/internal/pkg/composable/providers/kubernetes/hints.go b/internal/pkg/composable/providers/kubernetes/hints.go new file mode 100644 index 00000000000..1a779e0c2c6 --- /dev/null +++ b/internal/pkg/composable/providers/kubernetes/hints.go @@ -0,0 +1,258 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package kubernetes + +import ( + "fmt" + "regexp" + "strings" + + "github.com/elastic/elastic-agent-autodiscover/utils" + "github.com/elastic/elastic-agent-libs/logp" + "github.com/elastic/elastic-agent-libs/mapstr" +) + +const ( + integration = "package" + datastreams = "data_streams" + + host = "host" + period = "period" + timeout = "timeout" + metricspath = "metrics_path" + username = "username" + password = "password" + stream = "stream" // this is the container stream: stdout/stderr +) + +type hintsBuilder struct { + Key string + + logger *logp.Logger +} + +func (m *hintsBuilder) getIntegration(hints mapstr.M) string { + return utils.GetHintString(hints, m.Key, integration) +} + +func (m *hintsBuilder) getDataStreams(hints mapstr.M) []string { + ds := utils.GetHintAsList(hints, m.Key, datastreams) + return ds +} + +func (m *hintsBuilder) getHost(hints mapstr.M) string { + return utils.GetHintString(hints, m.Key, host) +} + +func (m *hintsBuilder) getStreamHost(hints mapstr.M, streamName string) string { + key := fmt.Sprintf("%v.%v", streamName, host) + return utils.GetHintString(hints, m.Key, key) +} + +func (m *hintsBuilder) getPeriod(hints mapstr.M) string { + return utils.GetHintString(hints, m.Key, period) +} + +func (m *hintsBuilder) getStreamPeriod(hints mapstr.M, streamName string) string { + key := fmt.Sprintf("%v.%v", streamName, period) + return utils.GetHintString(hints, m.Key, key) +} + +func (m *hintsBuilder) getTimeout(hints mapstr.M) string { + return utils.GetHintString(hints, m.Key, timeout) +} + +func (m *hintsBuilder) getStreamTimeout(hints mapstr.M, streamName string) string { + key := fmt.Sprintf("%v.%v", streamName, timeout) + return utils.GetHintString(hints, m.Key, key) +} + +func (m *hintsBuilder) getMetricspath(hints mapstr.M) string { + return utils.GetHintString(hints, m.Key, metricspath) +} + +func (m *hintsBuilder) getStreamMetricspath(hints mapstr.M, streamName string) string { + key := fmt.Sprintf("%v.%v", streamName, metricspath) + return utils.GetHintString(hints, m.Key, key) +} + +func (m *hintsBuilder) getUsername(hints mapstr.M) string { + return utils.GetHintString(hints, m.Key, username) +} + +func (m *hintsBuilder) getStreamUsername(hints mapstr.M, streamName string) string { + key := fmt.Sprintf("%v.%v", streamName, username) + return utils.GetHintString(hints, m.Key, key) +} + +func (m *hintsBuilder) getPassword(hints mapstr.M) string { + return utils.GetHintString(hints, m.Key, password) +} + +func (m *hintsBuilder) getStreamPassword(hints mapstr.M, streamName string) string { + key := fmt.Sprintf("%v.%v", streamName, password) + return utils.GetHintString(hints, m.Key, key) +} + +func (m *hintsBuilder) getContainerStream(hints mapstr.M) string { + return utils.GetHintString(hints, m.Key, stream) +} + +func (m *hintsBuilder) getStreamContainerStream(hints mapstr.M, streamName string) string { + key := fmt.Sprintf("%v.%v", streamName, stream) + return utils.GetHintString(hints, m.Key, key) +} + +// Replace hints like `'${kubernetes.pod.ip}:6379'` with the actual values from the resource metadata. +// So if you replace the `${kubernetes.pod.ip}` part with the value from the Pod's metadata +// you end up with sth like `10.28.90.345:6379` +func (m *hintsBuilder) getFromMeta(value string, kubeMeta mapstr.M) string { + if value == "" { + return "" + } + r := regexp.MustCompile(`\${([^{}]+)}`) + matches := r.FindAllString(value, -1) + for _, match := range matches { + key := strings.TrimSuffix(strings.TrimPrefix(match, "${kubernetes."), "}") + val, err := kubeMeta.GetValue(key) + if err != nil { + m.logger.Debugf("cannot retrieve key from k8smeta: %v", key) + return "" + } + hintVal, ok := val.(string) + if !ok { + m.logger.Debugf("cannot convert value into string: %v", val) + return "" + } + value = strings.Replace(value, match, hintVal, -1) + } + return value +} + +// GenerateHintsMapping gets a hint's map extracted from the annotations and constructs the final +// hints' mapping to be emitted. +func GenerateHintsMapping(hints mapstr.M, kubeMeta mapstr.M, logger *logp.Logger, containerID string) mapstr.M { + builder := hintsBuilder{ + Key: "hints", // consider doing it a configurable, + logger: logger, + } + + hintsMapping := mapstr.M{} + integration := builder.getIntegration(hints) + if integration == "" { + return hintsMapping + } + integrationHints := mapstr.M{ + "enabled": true, + } + + if containerID != "" { + _, _ = hintsMapping.Put("container_id", containerID) + // Add the default container log fallback to enable any template which defines + // a log input with a `"${kubernetes.hints.container_logs.enabled} == true"` condition + _, _ = integrationHints.Put("container_logs.enabled", true) + } + + // TODO: add support for processors + // Processors should be data_stream specific. + // Add a basic processor as a base like: + //- add_fields: + // target: kubernetes + // fields: + // hints: true + // Blocked by https://github.com/elastic/elastic-agent/issues/735 + + integrationHost := builder.getFromMeta(builder.getHost(hints), kubeMeta) + if integrationHost != "" { + _, _ = integrationHints.Put(host, integrationHost) + } + integrationPeriod := builder.getFromMeta(builder.getPeriod(hints), kubeMeta) + if integrationPeriod != "" { + _, _ = integrationHints.Put(period, integrationPeriod) + } + integrationTimeout := builder.getFromMeta(builder.getTimeout(hints), kubeMeta) + if integrationTimeout != "" { + _, _ = integrationHints.Put(timeout, integrationTimeout) + } + integrationMetricsPath := builder.getFromMeta(builder.getMetricspath(hints), kubeMeta) + if integrationMetricsPath != "" { + _, _ = integrationHints.Put(metricspath, integrationMetricsPath) + } + integrationUsername := builder.getFromMeta(builder.getUsername(hints), kubeMeta) + if integrationUsername != "" { + _, _ = integrationHints.Put(username, integrationUsername) + } + integrationPassword := builder.getFromMeta(builder.getPassword(hints), kubeMeta) + if integrationPassword != "" { + _, _ = integrationHints.Put(password, integrationPassword) + } + integrationContainerStream := builder.getFromMeta(builder.getContainerStream(hints), kubeMeta) + if integrationContainerStream != "" { + _, _ = integrationHints.Put(stream, integrationContainerStream) + } + + dataStreams := builder.getDataStreams(hints) + for _, dataStream := range dataStreams { + streamHints := mapstr.M{ + "enabled": true, + } + if integrationPeriod != "" { + _, _ = streamHints.Put(period, integrationPeriod) + } + if integrationHost != "" { + _, _ = streamHints.Put(host, integrationHost) + } + if integrationTimeout != "" { + _, _ = streamHints.Put(timeout, integrationTimeout) + } + if integrationMetricsPath != "" { + _, _ = streamHints.Put(metricspath, integrationMetricsPath) + } + if integrationUsername != "" { + _, _ = streamHints.Put(username, integrationUsername) + } + if integrationPassword != "" { + _, _ = streamHints.Put(password, integrationPassword) + } + if integrationContainerStream != "" { + _, _ = streamHints.Put(stream, integrationContainerStream) + } + + streamPeriod := builder.getFromMeta(builder.getStreamPeriod(hints, dataStream), kubeMeta) + if streamPeriod != "" { + _, _ = streamHints.Put(period, streamPeriod) + } + streamHost := builder.getFromMeta(builder.getStreamHost(hints, dataStream), kubeMeta) + if streamHost != "" { + _, _ = streamHints.Put(host, streamHost) + } + streamTimeout := builder.getFromMeta(builder.getStreamTimeout(hints, dataStream), kubeMeta) + if streamTimeout != "" { + _, _ = streamHints.Put(timeout, streamTimeout) + } + streamMetricsPath := builder.getFromMeta(builder.getStreamMetricspath(hints, dataStream), kubeMeta) + if streamMetricsPath != "" { + _, _ = streamHints.Put(metricspath, streamMetricsPath) + } + streamUsername := builder.getFromMeta(builder.getStreamUsername(hints, dataStream), kubeMeta) + if streamUsername != "" { + _, _ = streamHints.Put(username, streamUsername) + } + streamPassword := builder.getFromMeta(builder.getStreamPassword(hints, dataStream), kubeMeta) + if streamPassword != "" { + _, _ = streamHints.Put(password, streamPassword) + } + streamContainerStream := builder.getFromMeta(builder.getStreamContainerStream(hints, dataStream), kubeMeta) + if streamContainerStream != "" { + _, _ = streamHints.Put(stream, streamContainerStream) + } + _, _ = integrationHints.Put(dataStream, streamHints) + + } + + _, _ = hintsMapping.Put(integration, integrationHints) + + return hintsMapping +} diff --git a/internal/pkg/composable/providers/kubernetes/hints_test.go b/internal/pkg/composable/providers/kubernetes/hints_test.go new file mode 100644 index 00000000000..e23296d09a7 --- /dev/null +++ b/internal/pkg/composable/providers/kubernetes/hints_test.go @@ -0,0 +1,301 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package kubernetes + +import ( + "testing" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + + "github.com/stretchr/testify/assert" + + "github.com/elastic/elastic-agent-autodiscover/kubernetes" + "github.com/elastic/elastic-agent-libs/mapstr" +) + +func TestGenerateHintsMapping(t *testing.T) { + logger := getLogger() + pod := &kubernetes.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testpod", + UID: types.UID(uid), + Namespace: "testns", + Labels: map[string]string{ + "foo": "bar", + "with-dash": "dash-value", + "with/slash": "some/path", + }, + Annotations: map[string]string{ + "app": "production", + }, + }, + TypeMeta: metav1.TypeMeta{ + Kind: "Pod", + APIVersion: "v1", + }, + Spec: kubernetes.PodSpec{ + NodeName: "testnode", + }, + Status: kubernetes.PodStatus{PodIP: "127.0.0.5"}, + } + + mapping := map[string]interface{}{ + "namespace": pod.GetNamespace(), + "pod": mapstr.M{ + "uid": string(pod.GetUID()), + "name": pod.GetName(), + "ip": pod.Status.PodIP, + }, + "namespace_annotations": mapstr.M{ + "nsa": "nsb", + }, + "labels": mapstr.M{ + "foo": "bar", + "with-dash": "dash-value", + "with/slash": "some/path", + }, + "annotations": mapstr.M{ + "app": "production", + }, + } + hints := mapstr.M{ + "hints": mapstr.M{ + "data_streams": "info, key, keyspace", + "host": "${kubernetes.pod.ip}:6379", + "info": mapstr.M{"period": "1m", "timeout": "41s"}, + "key": mapstr.M{"period": "10m"}, + "package": "redis", + "password": "password", + "username": "username", + "metrics_path": "/metrics", + "timeout": "42s", + "period": "42s", + }, + } + + expected := mapstr.M{ + "redis": mapstr.M{ + "enabled": true, + "host": "127.0.0.5:6379", + "metrics_path": "/metrics", + "username": "username", + "password": "password", + "timeout": "42s", + "period": "42s", + "info": mapstr.M{ + "enabled": true, + "host": "127.0.0.5:6379", + "period": "1m", + "metrics_path": "/metrics", + "username": "username", + "password": "password", + "timeout": "41s", + }, "key": mapstr.M{ + "enabled": true, + "host": "127.0.0.5:6379", + "period": "10m", + "metrics_path": "/metrics", + "username": "username", + "password": "password", + "timeout": "42s", + }, "keyspace": mapstr.M{ + "enabled": true, + "host": "127.0.0.5:6379", + "period": "42s", + "metrics_path": "/metrics", + "username": "username", + "password": "password", + "timeout": "42s", + }, + }, + } + + hintsMapping := GenerateHintsMapping(hints, mapping, logger, "") + + assert.Equal(t, expected, hintsMapping) +} + +func TestGenerateHintsMappingWithContainerID(t *testing.T) { + logger := getLogger() + pod := &kubernetes.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testpod", + UID: types.UID(uid), + Namespace: "testns", + Labels: map[string]string{ + "foo": "bar", + "with-dash": "dash-value", + "with/slash": "some/path", + }, + Annotations: map[string]string{ + "app": "production", + }, + }, + TypeMeta: metav1.TypeMeta{ + Kind: "Pod", + APIVersion: "v1", + }, + Spec: kubernetes.PodSpec{ + NodeName: "testnode", + }, + Status: kubernetes.PodStatus{PodIP: "127.0.0.5"}, + } + + mapping := map[string]interface{}{ + "namespace": pod.GetNamespace(), + "pod": mapstr.M{ + "uid": string(pod.GetUID()), + "name": pod.GetName(), + "ip": pod.Status.PodIP, + }, + "namespace_annotations": mapstr.M{ + "nsa": "nsb", + }, + "labels": mapstr.M{ + "foo": "bar", + "with-dash": "dash-value", + "with/slash": "some/path", + }, + "annotations": mapstr.M{ + "app": "production", + }, + } + hints := mapstr.M{ + "hints": mapstr.M{ + "data_streams": "info, key, keyspace", + "host": "${kubernetes.pod.ip}:6379", + "info": mapstr.M{"period": "1m", "timeout": "41s"}, + "key": mapstr.M{"period": "10m"}, + "package": "redis", + "password": "password", + "username": "username", + "metrics_path": "/metrics", + "timeout": "42s", + "period": "42s", + }, + } + + expected := mapstr.M{ + "container_id": "asdfghjklqwerty", + "redis": mapstr.M{ + "container_logs": mapstr.M{ + "enabled": true, + }, + "enabled": true, + "host": "127.0.0.5:6379", + "metrics_path": "/metrics", + "username": "username", + "password": "password", + "timeout": "42s", + "period": "42s", + "info": mapstr.M{ + "enabled": true, + "host": "127.0.0.5:6379", + "period": "1m", + "metrics_path": "/metrics", + "username": "username", + "password": "password", + "timeout": "41s", + }, "key": mapstr.M{ + "enabled": true, + "host": "127.0.0.5:6379", + "period": "10m", + "metrics_path": "/metrics", + "username": "username", + "password": "password", + "timeout": "42s", + }, "keyspace": mapstr.M{ + "enabled": true, + "host": "127.0.0.5:6379", + "period": "42s", + "metrics_path": "/metrics", + "username": "username", + "password": "password", + "timeout": "42s", + }, + }, + } + + hintsMapping := GenerateHintsMapping(hints, mapping, logger, "asdfghjklqwerty") + + assert.Equal(t, expected, hintsMapping) +} + +func TestGenerateHintsMappingWithLogStream(t *testing.T) { + logger := getLogger() + pod := &kubernetes.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testpod", + UID: types.UID(uid), + Namespace: "testns", + Labels: map[string]string{ + "foo": "bar", + "with-dash": "dash-value", + "with/slash": "some/path", + }, + Annotations: map[string]string{ + "app": "production", + }, + }, + TypeMeta: metav1.TypeMeta{ + Kind: "Pod", + APIVersion: "v1", + }, + Spec: kubernetes.PodSpec{ + NodeName: "testnode", + }, + Status: kubernetes.PodStatus{PodIP: "127.0.0.5"}, + } + + mapping := map[string]interface{}{ + "namespace": pod.GetNamespace(), + "pod": mapstr.M{ + "uid": string(pod.GetUID()), + "name": pod.GetName(), + "ip": pod.Status.PodIP, + }, + "namespace_annotations": mapstr.M{ + "nsa": "nsb", + }, + "labels": mapstr.M{ + "foo": "bar", + "with-dash": "dash-value", + "with/slash": "some/path", + }, + "annotations": mapstr.M{ + "app": "production", + }, + } + hints := mapstr.M{ + "hints": mapstr.M{ + "data_streams": "access, error", + "access": mapstr.M{"stream": "stdout"}, + "error": mapstr.M{"stream": "stderr"}, + "package": "apache", + }, + } + + expected := mapstr.M{ + "container_id": "asdfghjkl", + "apache": mapstr.M{ + "enabled": true, + "container_logs": mapstr.M{ + "enabled": true, + }, + "access": mapstr.M{ + "enabled": true, + "stream": "stdout", + }, "error": mapstr.M{ + "enabled": true, + "stream": "stderr", + }, + }, + } + + hintsMapping := GenerateHintsMapping(hints, mapping, logger, "asdfghjkl") + + assert.Equal(t, expected, hintsMapping) +} diff --git a/internal/pkg/composable/providers/kubernetes/kubernetes.go b/internal/pkg/composable/providers/kubernetes/kubernetes.go index 91367c5252f..ab4a14d6a61 100644 --- a/internal/pkg/composable/providers/kubernetes/kubernetes.go +++ b/internal/pkg/composable/providers/kubernetes/kubernetes.go @@ -7,9 +7,11 @@ package kubernetes import ( "fmt" + "github.com/elastic/elastic-agent-autodiscover/kubernetes" + "github.com/elastic/elastic-agent-libs/logp" + k8s "k8s.io/client-go/kubernetes" - "github.com/elastic/elastic-agent-autodiscover/kubernetes" "github.com/elastic/elastic-agent/internal/pkg/agent/errors" "github.com/elastic/elastic-agent/internal/pkg/composable" "github.com/elastic/elastic-agent/internal/pkg/config" @@ -34,12 +36,13 @@ func init() { } type dynamicProvider struct { - logger *logger.Logger - config *Config + logger *logger.Logger + config *Config + managed bool } // DynamicProviderBuilder builds the dynamic provider. -func DynamicProviderBuilder(logger *logger.Logger, c *config.Config) (composable.DynamicProvider, error) { +func DynamicProviderBuilder(logger *logger.Logger, c *config.Config, managed bool) (composable.DynamicProvider, error) { var cfg Config if c == nil { c = config.New() @@ -49,11 +52,15 @@ func DynamicProviderBuilder(logger *logger.Logger, c *config.Config) (composable return nil, errors.New(err, "failed to unpack configuration") } - return &dynamicProvider{logger, &cfg}, nil + return &dynamicProvider{logger, &cfg, managed}, nil } // Run runs the kubernetes context provider. func (p *dynamicProvider) Run(comm composable.DynamicProviderComm) error { + if p.config.Hints.Enabled() { + betalogger := logp.NewLogger("cfgwarn") + betalogger.Warnf("BETA: Hints' feature is beta.") + } if p.config.Resources.Pod.Enabled { err := p.watchResource(comm, "pod") if err != nil { @@ -139,19 +146,19 @@ func (p *dynamicProvider) newEventer( client k8s.Interface) (Eventer, error) { switch resourceType { case "pod": - eventer, err := NewPodEventer(comm, p.config, p.logger, client, p.config.Scope) + eventer, err := NewPodEventer(comm, p.config, p.logger, client, p.config.Scope, p.managed) if err != nil { return nil, err } return eventer, nil case nodeScope: - eventer, err := NewNodeEventer(comm, p.config, p.logger, client, p.config.Scope) + eventer, err := NewNodeEventer(comm, p.config, p.logger, client, p.config.Scope, p.managed) if err != nil { return nil, err } return eventer, nil case "service": - eventer, err := NewServiceEventer(comm, p.config, p.logger, client, p.config.Scope) + eventer, err := NewServiceEventer(comm, p.config, p.logger, client, p.config.Scope, p.managed) if err != nil { return nil, err } diff --git a/internal/pkg/composable/providers/kubernetes/node.go b/internal/pkg/composable/providers/kubernetes/node.go index a1539afb9c1..0e5aebc8931 100644 --- a/internal/pkg/composable/providers/kubernetes/node.go +++ b/internal/pkg/composable/providers/kubernetes/node.go @@ -43,7 +43,8 @@ func NewNodeEventer( cfg *Config, logger *logp.Logger, client k8s.Interface, - scope string) (Eventer, error) { + scope string, + managed bool) (Eventer, error) { watcher, err := kubernetes.NewNamedWatcher("agent-node", client, &kubernetes.Node{}, kubernetes.WatchOptions{ SyncTimeout: cfg.SyncPeriod, Node: cfg.Node, diff --git a/internal/pkg/composable/providers/kubernetes/node_test.go b/internal/pkg/composable/providers/kubernetes/node_test.go index ab19e7d2ce2..8415304b00b 100644 --- a/internal/pkg/composable/providers/kubernetes/node_test.go +++ b/internal/pkg/composable/providers/kubernetes/node_test.go @@ -93,11 +93,6 @@ func TestGenerateNodeData(t *testing.T) { type nodeMeta struct{} // Generate generates node metadata from a resource object -// Metadata map is in the following form: -// { -// "kubernetes": {}, -// "some.ecs.field": "asdf" -// } // All Kubernetes fields that need to be stored under kubernetes. prefix are populated by // GenerateK8s method while fields that are part of ECS are generated by GenerateECS method func (n *nodeMeta) Generate(obj kubernetes.Resource, opts ...metadata.FieldOptions) mapstr.M { diff --git a/internal/pkg/composable/providers/kubernetes/pod.go b/internal/pkg/composable/providers/kubernetes/pod.go index 034df3c7a72..53814a182fa 100644 --- a/internal/pkg/composable/providers/kubernetes/pod.go +++ b/internal/pkg/composable/providers/kubernetes/pod.go @@ -9,6 +9,8 @@ import ( "sync" "time" + "github.com/elastic/elastic-agent-autodiscover/utils" + "github.com/elastic/elastic-agent-autodiscover/kubernetes" "github.com/elastic/elastic-agent-autodiscover/kubernetes/metadata" c "github.com/elastic/elastic-agent-libs/config" @@ -31,6 +33,7 @@ type pod struct { config *Config logger *logp.Logger scope string + managed bool cleanupTimeout time.Duration // Mutex used by configuration updates not triggered by the main watcher, @@ -51,7 +54,8 @@ func NewPodEventer( cfg *Config, logger *logp.Logger, client k8s.Interface, - scope string) (Eventer, error) { + scope string, + managed bool) (Eventer, error) { watcher, err := kubernetes.NewNamedWatcher("agent-pod", client, &kubernetes.Pod{}, kubernetes.WatchOptions{ SyncTimeout: cfg.SyncPeriod, Node: cfg.Node, @@ -95,6 +99,7 @@ func NewPodEventer( watcher: watcher, nodeWatcher: nodeWatcher, namespaceWatcher: namespaceWatcher, + managed: managed, } watcher.AddEventHandler(p) @@ -149,10 +154,32 @@ func (p *pod) emitRunning(pod *kubernetes.Pod) { data := generatePodData(pod, p.metagen, namespaceAnnotations) data.mapping["scope"] = p.scope - // Emit the pod - // We emit Pod + containers to ensure that configs matching Pod only - // get Pod metadata (not specific to any container) - _ = p.comm.AddOrUpdate(data.uid, PodPriority, data.mapping, data.processors) + + if p.config.Hints.Enabled() { // This is "hints based autodiscovery flow" + if !p.managed { + if ann, ok := data.mapping["annotations"]; ok { + annotations, _ := ann.(mapstr.M) + hints := utils.GenerateHints(annotations, "", p.config.Prefix) + if len(hints) > 0 { + p.logger.Errorf("Extracted hints are :%v", hints) + hintsMapping := GenerateHintsMapping(hints, data.mapping, p.logger, "") + p.logger.Errorf("Generated hints mappings are :%v", hintsMapping) + _ = p.comm.AddOrUpdate( + data.uid, + PodPriority, + map[string]interface{}{"hints": hintsMapping}, + data.processors, + ) + } + } + } + } else { // This is the "template-based autodiscovery" flow + // emit normal mapping to be used in dynamic variable resolution + // Emit the pod + // We emit Pod + containers to ensure that configs matching Pod only + // get Pod metadata (not specific to any container) + _ = p.comm.AddOrUpdate(data.uid, PodPriority, data.mapping, data.processors) + } // Emit all containers in the pod // We should deal with init containers stopping after initialization @@ -160,7 +187,7 @@ func (p *pod) emitRunning(pod *kubernetes.Pod) { } func (p *pod) emitContainers(pod *kubernetes.Pod, namespaceAnnotations mapstr.M) { - generateContainerData(p.comm, pod, p.metagen, namespaceAnnotations) + generateContainerData(p.comm, pod, p.metagen, namespaceAnnotations, p.logger, p.managed, p.config) } func (p *pod) emitStopped(pod *kubernetes.Pod) { @@ -265,7 +292,10 @@ func generateContainerData( comm composable.DynamicProviderComm, pod *kubernetes.Pod, kubeMetaGen metadata.MetaGen, - namespaceAnnotations mapstr.M) { + namespaceAnnotations mapstr.M, + logger *logp.Logger, + managed bool, + config *Config) { containers := kubernetes.GetContainersInPod(pod) @@ -344,7 +374,28 @@ func generateContainerData( _, _ = containerMeta.Put("port", fmt.Sprintf("%v", port.ContainerPort)) _, _ = containerMeta.Put("port_name", port.Name) k8sMapping["container"] = containerMeta - _ = comm.AddOrUpdate(eventID, ContainerPriority, k8sMapping, processors) + + if config.Hints.Enabled() { // This is "hints based autodiscovery flow" + if !managed { + if ann, ok := k8sMapping["annotations"]; ok { + annotations, _ := ann.(mapstr.M) + hints := utils.GenerateHints(annotations, "", config.Prefix) + if len(hints) > 0 { + logger.Debugf("Extracted hints are :%v", hints) + hintsMapping := GenerateHintsMapping(hints, k8sMapping, logger, c.ID) + logger.Debugf("Generated hints mappings are :%v", hintsMapping) + _ = comm.AddOrUpdate( + eventID, + PodPriority, + map[string]interface{}{"hints": hintsMapping}, + processors, + ) + } + } + } + } else { // This is the "template-based autodiscovery" flow + _ = comm.AddOrUpdate(eventID, ContainerPriority, k8sMapping, processors) + } } } else { k8sMapping["container"] = containerMeta diff --git a/internal/pkg/composable/providers/kubernetes/pod_test.go b/internal/pkg/composable/providers/kubernetes/pod_test.go index 95361fd2ce0..7409ad1a3ea 100644 --- a/internal/pkg/composable/providers/kubernetes/pod_test.go +++ b/internal/pkg/composable/providers/kubernetes/pod_test.go @@ -9,17 +9,28 @@ import ( "fmt" "testing" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "github.com/stretchr/testify/assert" + "github.com/elastic/elastic-agent-libs/logp" + "github.com/elastic/elastic-agent/pkg/core/logger" + "github.com/elastic/elastic-agent-autodiscover/kubernetes" "github.com/elastic/elastic-agent-autodiscover/kubernetes/metadata" "github.com/elastic/elastic-agent-libs/mapstr" - - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" + "github.com/elastic/elastic-agent/internal/pkg/config" ) +func getLogger() *logger.Logger { + loggerCfg := logger.DefaultLoggingConfig() + loggerCfg.Level = logp.ErrorLevel + l, _ := logger.NewFromConfig("", loggerCfg, false) + return l +} + func TestGeneratePodData(t *testing.T) { pod := &kubernetes.Pod{ ObjectMeta: metav1.ObjectMeta{ @@ -153,13 +164,21 @@ func TestGenerateContainerPodData(t *testing.T) { context.TODO(), providerDataChan, } + logger := getLogger() + var cfg Config + c := config.New() + _ = c.Unpack(&cfg) generateContainerData( &comm, pod, &podMeta{}, mapstr.M{ "nsa": "nsb", - }) + }, + logger, + true, + &cfg, + ) mapping := map[string]interface{}{ "namespace": pod.GetNamespace(), @@ -274,13 +293,21 @@ func TestEphemeralContainers(t *testing.T) { context.TODO(), providerDataChan, } + + logger := getLogger() + var cfg Config + c := config.New() + _ = c.Unpack(&cfg) generateContainerData( &comm, pod, &podMeta{}, mapstr.M{ "nsa": "nsb", - }) + }, + logger, + true, + &cfg) mapping := map[string]interface{}{ "namespace": pod.GetNamespace(), @@ -366,11 +393,6 @@ func (t *MockDynamicComm) Remove(id string) { type podMeta struct{} // Generate generates pod metadata from a resource object -// Metadata map is in the following form: -// { -// "kubernetes": {}, -// "some.ecs.field": "asdf" -// } // All Kubernetes fields that need to be stored under kubernetes. prefix are populated by // GenerateK8s method while fields that are part of ECS are generated by GenerateECS method func (p *podMeta) Generate(obj kubernetes.Resource, opts ...metadata.FieldOptions) mapstr.M { diff --git a/internal/pkg/composable/providers/kubernetes/service.go b/internal/pkg/composable/providers/kubernetes/service.go index 49c20627734..4060c12e646 100644 --- a/internal/pkg/composable/providers/kubernetes/service.go +++ b/internal/pkg/composable/providers/kubernetes/service.go @@ -43,7 +43,8 @@ func NewServiceEventer( cfg *Config, logger *logp.Logger, client k8s.Interface, - scope string) (Eventer, error) { + scope string, + managed bool) (Eventer, error) { watcher, err := kubernetes.NewNamedWatcher("agent-service", client, &kubernetes.Service{}, kubernetes.WatchOptions{ SyncTimeout: cfg.SyncPeriod, Node: cfg.Node, diff --git a/internal/pkg/composable/providers/kubernetes/service_test.go b/internal/pkg/composable/providers/kubernetes/service_test.go index 69e945ee1cd..1943e3cfcdb 100644 --- a/internal/pkg/composable/providers/kubernetes/service_test.go +++ b/internal/pkg/composable/providers/kubernetes/service_test.go @@ -107,11 +107,6 @@ func TestGenerateServiceData(t *testing.T) { type svcMeta struct{} // Generate generates svc metadata from a resource object -// Metadata map is in the following form: -// { -// "kubernetes": {}, -// "some.ecs.field": "asdf" -// } // All Kubernetes fields that need to be stored under kubernetes. prefix are populated by // GenerateK8s method while fields that are part of ECS are generated by GenerateECS method func (s *svcMeta) Generate(obj kubernetes.Resource, opts ...metadata.FieldOptions) mapstr.M { diff --git a/internal/pkg/composable/providers/kubernetesleaderelection/kubernetes_leaderelection.go b/internal/pkg/composable/providers/kubernetesleaderelection/kubernetes_leaderelection.go index 410e13ec77d..0276a4a6e0c 100644 --- a/internal/pkg/composable/providers/kubernetesleaderelection/kubernetes_leaderelection.go +++ b/internal/pkg/composable/providers/kubernetesleaderelection/kubernetes_leaderelection.go @@ -35,7 +35,7 @@ type contextProvider struct { } // ContextProviderBuilder builds the provider. -func ContextProviderBuilder(logger *logger.Logger, c *config.Config) (corecomp.ContextProvider, error) { +func ContextProviderBuilder(logger *logger.Logger, c *config.Config, managed bool) (corecomp.ContextProvider, error) { var cfg Config if c == nil { c = config.New() diff --git a/internal/pkg/composable/providers/kubernetessecrets/kubernetes_secrets.go b/internal/pkg/composable/providers/kubernetessecrets/kubernetes_secrets.go index 0bc560295ed..e560b08a599 100644 --- a/internal/pkg/composable/providers/kubernetessecrets/kubernetes_secrets.go +++ b/internal/pkg/composable/providers/kubernetessecrets/kubernetes_secrets.go @@ -34,7 +34,7 @@ type contextProviderK8sSecrets struct { } // ContextProviderBuilder builds the context provider. -func ContextProviderBuilder(logger *logger.Logger, c *config.Config) (corecomp.ContextProvider, error) { +func ContextProviderBuilder(logger *logger.Logger, c *config.Config, managed bool) (corecomp.ContextProvider, error) { var cfg Config if c == nil { c = config.New() diff --git a/internal/pkg/composable/providers/kubernetessecrets/kubernetes_secrets_test.go b/internal/pkg/composable/providers/kubernetessecrets/kubernetes_secrets_test.go index 4c80800a59b..079d7b4becc 100644 --- a/internal/pkg/composable/providers/kubernetessecrets/kubernetes_secrets_test.go +++ b/internal/pkg/composable/providers/kubernetessecrets/kubernetes_secrets_test.go @@ -49,7 +49,7 @@ func Test_K8sSecretsProvider_Fetch(t *testing.T) { cfg, err := config.NewConfigFrom(map[string]string{"a": "b"}) require.NoError(t, err) - p, err := ContextProviderBuilder(logger, cfg) + p, err := ContextProviderBuilder(logger, cfg, true) require.NoError(t, err) fp, _ := p.(corecomp.FetchContextProvider) @@ -86,7 +86,7 @@ func Test_K8sSecretsProvider_FetchWrongSecret(t *testing.T) { cfg, err := config.NewConfigFrom(map[string]string{"a": "b"}) require.NoError(t, err) - p, err := ContextProviderBuilder(logger, cfg) + p, err := ContextProviderBuilder(logger, cfg, true) require.NoError(t, err) fp, _ := p.(corecomp.FetchContextProvider) diff --git a/internal/pkg/composable/providers/local/local.go b/internal/pkg/composable/providers/local/local.go index 9c611ecbd13..2078dcf40ed 100644 --- a/internal/pkg/composable/providers/local/local.go +++ b/internal/pkg/composable/providers/local/local.go @@ -15,7 +15,7 @@ import ( ) func init() { - composable.Providers.AddContextProvider("local", ContextProviderBuilder) + _ = composable.Providers.AddContextProvider("local", ContextProviderBuilder) } type contextProvider struct { @@ -32,7 +32,7 @@ func (c *contextProvider) Run(comm corecomp.ContextProviderComm) error { } // ContextProviderBuilder builds the context provider. -func ContextProviderBuilder(_ *logger.Logger, c *config.Config) (corecomp.ContextProvider, error) { +func ContextProviderBuilder(_ *logger.Logger, c *config.Config, managed bool) (corecomp.ContextProvider, error) { p := &contextProvider{} if c != nil { err := c.Unpack(p) diff --git a/internal/pkg/composable/providers/local/local_test.go b/internal/pkg/composable/providers/local/local_test.go index 6afe29251d5..dfec629b88a 100644 --- a/internal/pkg/composable/providers/local/local_test.go +++ b/internal/pkg/composable/providers/local/local_test.go @@ -26,7 +26,7 @@ func TestContextProvider(t *testing.T) { }) require.NoError(t, err) builder, _ := composable.Providers.GetContextProvider("local") - provider, err := builder(nil, cfg) + provider, err := builder(nil, cfg, true) require.NoError(t, err) comm := ctesting.NewContextComm(context.Background()) diff --git a/internal/pkg/composable/providers/localdynamic/localdynamic.go b/internal/pkg/composable/providers/localdynamic/localdynamic.go index f4f99ca4030..39a233d72da 100644 --- a/internal/pkg/composable/providers/localdynamic/localdynamic.go +++ b/internal/pkg/composable/providers/localdynamic/localdynamic.go @@ -18,7 +18,7 @@ import ( const ItemPriority = 0 func init() { - composable.Providers.AddDynamicProvider("local_dynamic", DynamicProviderBuilder) + _ = composable.Providers.AddDynamicProvider("local_dynamic", DynamicProviderBuilder) } type dynamicItem struct { @@ -41,7 +41,7 @@ func (c *dynamicProvider) Run(comm composable.DynamicProviderComm) error { } // DynamicProviderBuilder builds the dynamic provider. -func DynamicProviderBuilder(_ *logger.Logger, c *config.Config) (composable.DynamicProvider, error) { +func DynamicProviderBuilder(_ *logger.Logger, c *config.Config, managed bool) (composable.DynamicProvider, error) { p := &dynamicProvider{} if c != nil { err := c.Unpack(p) diff --git a/internal/pkg/composable/providers/localdynamic/localdynamic_test.go b/internal/pkg/composable/providers/localdynamic/localdynamic_test.go index a20b37852d9..8cc0a44ccd7 100644 --- a/internal/pkg/composable/providers/localdynamic/localdynamic_test.go +++ b/internal/pkg/composable/providers/localdynamic/localdynamic_test.go @@ -60,7 +60,7 @@ func TestContextProvider(t *testing.T) { }) require.NoError(t, err) builder, _ := composable.Providers.GetDynamicProvider("local_dynamic") - provider, err := builder(nil, cfg) + provider, err := builder(nil, cfg, true) require.NoError(t, err) comm := ctesting.NewDynamicComm(context.Background()) diff --git a/internal/pkg/composable/providers/path/path.go b/internal/pkg/composable/providers/path/path.go index 455f46d2b28..f0062d19b57 100644 --- a/internal/pkg/composable/providers/path/path.go +++ b/internal/pkg/composable/providers/path/path.go @@ -14,7 +14,7 @@ import ( ) func init() { - composable.Providers.AddContextProvider("path", ContextProviderBuilder) + _ = composable.Providers.AddContextProvider("path", ContextProviderBuilder) } type contextProvider struct{} @@ -34,6 +34,6 @@ func (*contextProvider) Run(comm corecomp.ContextProviderComm) error { } // ContextProviderBuilder builds the context provider. -func ContextProviderBuilder(_ *logger.Logger, _ *config.Config) (corecomp.ContextProvider, error) { +func ContextProviderBuilder(_ *logger.Logger, _ *config.Config, managed bool) (corecomp.ContextProvider, error) { return &contextProvider{}, nil } diff --git a/internal/pkg/composable/providers/path/path_test.go b/internal/pkg/composable/providers/path/path_test.go index 14f263e56db..094865d3fbd 100644 --- a/internal/pkg/composable/providers/path/path_test.go +++ b/internal/pkg/composable/providers/path/path_test.go @@ -18,7 +18,7 @@ import ( func TestContextProvider(t *testing.T) { builder, _ := composable.Providers.GetContextProvider("path") - provider, err := builder(nil, nil) + provider, err := builder(nil, nil, true) require.NoError(t, err) comm := ctesting.NewContextComm(context.Background()) diff --git a/internal/pkg/composable/testing/dynamic.go b/internal/pkg/composable/testing/dynamic.go index bfa48dff57d..99b499835cd 100644 --- a/internal/pkg/composable/testing/dynamic.go +++ b/internal/pkg/composable/testing/dynamic.go @@ -81,6 +81,7 @@ func (t *DynamicComm) Previous(id string) (DynamicState, bool) { return prev, ok } +//nolint:prealloc,goimports,nolintlint // false positive // PreviousIDs returns the previous set mapping ID. func (t *DynamicComm) PreviousIDs() []string { t.lock.Lock() @@ -100,6 +101,7 @@ func (t *DynamicComm) Current(id string) (DynamicState, bool) { return curr, ok } +//nolint:prealloc,goimports,nolintlint // false positive // CurrentIDs returns the current set mapping ID. func (t *DynamicComm) CurrentIDs() []string { t.lock.Lock() From 558380bf1c02b79ba8c5aca7a74fc49250a1854d Mon Sep 17 00:00:00 2001 From: Victor Martinez Date: Wed, 14 Sep 2022 10:27:45 +0100 Subject: [PATCH 121/180] ci: increase timeout (#1190) --- .ci/Jenkinsfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.ci/Jenkinsfile b/.ci/Jenkinsfile index e1c0de3a2c6..9fb7021784a 100644 --- a/.ci/Jenkinsfile +++ b/.ci/Jenkinsfile @@ -19,7 +19,7 @@ pipeline { DEVELOPER_MODE=true } options { - timeout(time: 2, unit: 'HOURS') + timeout(time: 3, unit: 'HOURS') buildDiscarder(logRotator(numToKeepStr: '20', artifactNumToKeepStr: '20', daysToKeepStr: '30')) timestamps() ansiColor('xterm') From 332d08d726e59bad60020ef74f8aa116cf67e4f7 Mon Sep 17 00:00:00 2001 From: Andrew Gizas Date: Wed, 14 Sep 2022 12:49:08 +0300 Subject: [PATCH 122/180] Fixing condition for PR creation (#1188) --- deploy/kubernetes/Makefile | 37 ++++++++++++++++++++----------------- 1 file changed, 20 insertions(+), 17 deletions(-) diff --git a/deploy/kubernetes/Makefile b/deploy/kubernetes/Makefile index 6d31e04e38e..295b238cef5 100644 --- a/deploy/kubernetes/Makefile +++ b/deploy/kubernetes/Makefile @@ -51,12 +51,9 @@ ci-clone-kibana-repository: ## ci-create-kubernetes-templates-pull-request : Create the pull request for the kubernetes templates .PHONY: ci-create-kubernetes-templates-pull-request ci-create-kubernetes-templates-pull-request: - HASDIFF=$$(git status | grep $(FILE_REPO) | wc -l); \ - if [ $${HASDIFF} -ne 1 ]; \ - then \ - echo "No differences found with kibana git repository" && \ - exit 1; \ - fi + HASDIFF=`git status | grep $(FILE_REPO) | wc -l`; \ + echo $${HASDIFF} +ifeq ($${HASDIFF},1) echo "INFO: Create branch to update k8s templates" git config user.name obscloudnativemonitoring git config user.email obs-cloudnative-monitoring@elastic.co @@ -67,18 +64,24 @@ ci-create-kubernetes-templates-pull-request: git diff --staged --quiet || git commit -m "[Automated PR] Publish kubernetes templates for elastic-agent" echo "INFO: show remote details" git remote -v + ifeq ($(DRY_RUN),TRUE) - echo "INFO: skip pushing branch" + echo "INFO: skip pushing branch" else - echo "INFO: push branch" - @git push --set-upstream origin $(ELASTIC_AGENT_BRANCH) - echo "INFO: create pull request" - @gh pr create \ - --title "Update kubernetes templates for elastic-agent" \ - --body "Automated by ${BUILD_URL}" \ - --label automation \ - --base main \ - --head $(ELASTIC_AGENT_BRANCH) \ - --reviewer elastic/obs-cloudnative-monitoring + echo "INFO: push branch" + @git push --set-upstream origin $(ELASTIC_AGENT_BRANCH) + echo "INFO: create pull request" + @gh pr create \ + --title "Update kubernetes templates for elastic-agent" \ + --body "Automated by ${BUILD_URL}" \ + --label automation \ + --label release_note:automation \ + --base main \ + --head $(ELASTIC_AGENT_BRANCH) \ + --reviewer elastic/obs-cloudnative-monitoring endif +else + echo "No differences found with kibana git repository" +endif + From 8b711f3923023827c32871b5dbed1c771bfa7e0c Mon Sep 17 00:00:00 2001 From: Chris Mark Date: Wed, 14 Sep 2022 13:57:26 +0300 Subject: [PATCH 123/180] Fix leftover log level (#1194) --- internal/pkg/composable/providers/kubernetes/pod.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/internal/pkg/composable/providers/kubernetes/pod.go b/internal/pkg/composable/providers/kubernetes/pod.go index 53814a182fa..d4553dda6d3 100644 --- a/internal/pkg/composable/providers/kubernetes/pod.go +++ b/internal/pkg/composable/providers/kubernetes/pod.go @@ -161,9 +161,9 @@ func (p *pod) emitRunning(pod *kubernetes.Pod) { annotations, _ := ann.(mapstr.M) hints := utils.GenerateHints(annotations, "", p.config.Prefix) if len(hints) > 0 { - p.logger.Errorf("Extracted hints are :%v", hints) + p.logger.Debugf("Extracted hints are :%v", hints) hintsMapping := GenerateHintsMapping(hints, data.mapping, p.logger, "") - p.logger.Errorf("Generated hints mappings are :%v", hintsMapping) + p.logger.Debugf("Generated hints mappings are :%v", hintsMapping) _ = p.comm.AddOrUpdate( data.uid, PodPriority, From ae2c942a8ce79261fbc00cfd902945018ddd7aca Mon Sep 17 00:00:00 2001 From: Elastic Machine Date: Wed, 14 Sep 2022 06:57:52 -0400 Subject: [PATCH 124/180] [automation] Publish kubernetes templates for elastic-agent (#1192) Co-authored-by: apmmachine --- .../templates.d/activemq.yml | 96 + .../templates.d/apache.yml | 134 + .../templates.d/cassandra.yml | 327 + .../templates.d/cef.yml | 51 + .../templates.d/checkpoint.yml | 62 + .../templates.d/cloud_security_posture.yml | 93 + .../templates.d/cockroachdb.yml | 44 + .../templates.d/crowdstrike.yml | 79 + .../templates.d/cyberarkpas.yml | 57 + .../templates.d/elasticsearch.yml | 288 + .../templates.d/endpoint.yml | 22 + .../templates.d/fireeye.yml | 59 + .../templates.d/haproxy.yml | 68 + .../templates.d/hashicorp_vault.yml | 73 + .../templates.d/hid_bravura_monitor.yml | 42 + .../templates.d/iis.yml | 71 + .../templates.d/infoblox_nios.yml | 51 + .../templates.d/iptables.yml | 54 + .../templates.d/kafka.yml | 61 + .../templates.d/keycloak.yml | 23 + .../templates.d/kibana.yml | 112 + .../templates.d/log.yml | 18 + .../templates.d/logstash.yml | 75 + .../templates.d/mattermost.yml | 22 + .../templates.d/microsoft_sqlserver.yml | 127 + .../templates.d/mimecast.yml | 381 + .../templates.d/modsecurity.yml | 28 + .../templates.d/mongodb.yml | 73 + .../templates.d/mysql.yml | 82 + .../templates.d/mysql_enterprise.yml | 18 + .../templates.d/nats.yml | 82 + .../templates.d/netflow.yml | 47 + .../templates.d/nginx.yml | 142 + .../templates.d/nginx_ingress_controller.yml | 53 + .../templates.d/oracle.yml | 82 + .../templates.d/osquery.yml | 23 + .../templates.d/osquery_manager.yml | 33 + .../templates.d/panw.yml | 94 + .../templates.d/panw_cortex_xdr.yml | 90 + .../templates.d/pfsense.yml | 62 + .../templates.d/postgresql.yml | 68 + .../templates.d/prometheus.yml | 90 + .../templates.d/qnap_nas.yml | 60 + .../templates.d/rabbitmq.yml | 79 + .../templates.d/redis.yml | 84 + .../templates.d/santa.yml | 23 + .../templates.d/security_detection_engine.yml | 22 + .../templates.d/sentinel_one.yml | 217 + .../templates.d/snort.yml | 53 + .../templates.d/snyk.yml | 139 + .../templates.d/stan.yml | 56 + .../templates.d/suricata.yml | 24 + .../templates.d/symantec_endpoint.yml | 67 + .../templates.d/synthetics.yml | 148 + .../templates.d/tcp.yml | 32 + .../templates.d/tomcat.yml | 8296 +++++++++++++++++ .../templates.d/traefik.yml | 37 + .../templates.d/udp.yml | 33 + .../templates.d/zeek.yml | 2271 +++++ .../templates.d/zookeeper.yml | 54 + 60 files changed, 15252 insertions(+) create mode 100644 deploy/kubernetes/elastic-agent-standalone/templates.d/activemq.yml create mode 100644 deploy/kubernetes/elastic-agent-standalone/templates.d/apache.yml create mode 100644 deploy/kubernetes/elastic-agent-standalone/templates.d/cassandra.yml create mode 100644 deploy/kubernetes/elastic-agent-standalone/templates.d/cef.yml create mode 100644 deploy/kubernetes/elastic-agent-standalone/templates.d/checkpoint.yml create mode 100644 deploy/kubernetes/elastic-agent-standalone/templates.d/cloud_security_posture.yml create mode 100644 deploy/kubernetes/elastic-agent-standalone/templates.d/cockroachdb.yml create mode 100644 deploy/kubernetes/elastic-agent-standalone/templates.d/crowdstrike.yml create mode 100644 deploy/kubernetes/elastic-agent-standalone/templates.d/cyberarkpas.yml create mode 100644 deploy/kubernetes/elastic-agent-standalone/templates.d/elasticsearch.yml create mode 100644 deploy/kubernetes/elastic-agent-standalone/templates.d/endpoint.yml create mode 100644 deploy/kubernetes/elastic-agent-standalone/templates.d/fireeye.yml create mode 100644 deploy/kubernetes/elastic-agent-standalone/templates.d/haproxy.yml create mode 100644 deploy/kubernetes/elastic-agent-standalone/templates.d/hashicorp_vault.yml create mode 100644 deploy/kubernetes/elastic-agent-standalone/templates.d/hid_bravura_monitor.yml create mode 100644 deploy/kubernetes/elastic-agent-standalone/templates.d/iis.yml create mode 100644 deploy/kubernetes/elastic-agent-standalone/templates.d/infoblox_nios.yml create mode 100644 deploy/kubernetes/elastic-agent-standalone/templates.d/iptables.yml create mode 100644 deploy/kubernetes/elastic-agent-standalone/templates.d/kafka.yml create mode 100644 deploy/kubernetes/elastic-agent-standalone/templates.d/keycloak.yml create mode 100644 deploy/kubernetes/elastic-agent-standalone/templates.d/kibana.yml create mode 100644 deploy/kubernetes/elastic-agent-standalone/templates.d/log.yml create mode 100644 deploy/kubernetes/elastic-agent-standalone/templates.d/logstash.yml create mode 100644 deploy/kubernetes/elastic-agent-standalone/templates.d/mattermost.yml create mode 100644 deploy/kubernetes/elastic-agent-standalone/templates.d/microsoft_sqlserver.yml create mode 100644 deploy/kubernetes/elastic-agent-standalone/templates.d/mimecast.yml create mode 100644 deploy/kubernetes/elastic-agent-standalone/templates.d/modsecurity.yml create mode 100644 deploy/kubernetes/elastic-agent-standalone/templates.d/mongodb.yml create mode 100644 deploy/kubernetes/elastic-agent-standalone/templates.d/mysql.yml create mode 100644 deploy/kubernetes/elastic-agent-standalone/templates.d/mysql_enterprise.yml create mode 100644 deploy/kubernetes/elastic-agent-standalone/templates.d/nats.yml create mode 100644 deploy/kubernetes/elastic-agent-standalone/templates.d/netflow.yml create mode 100644 deploy/kubernetes/elastic-agent-standalone/templates.d/nginx.yml create mode 100644 deploy/kubernetes/elastic-agent-standalone/templates.d/nginx_ingress_controller.yml create mode 100644 deploy/kubernetes/elastic-agent-standalone/templates.d/oracle.yml create mode 100644 deploy/kubernetes/elastic-agent-standalone/templates.d/osquery.yml create mode 100644 deploy/kubernetes/elastic-agent-standalone/templates.d/osquery_manager.yml create mode 100644 deploy/kubernetes/elastic-agent-standalone/templates.d/panw.yml create mode 100644 deploy/kubernetes/elastic-agent-standalone/templates.d/panw_cortex_xdr.yml create mode 100644 deploy/kubernetes/elastic-agent-standalone/templates.d/pfsense.yml create mode 100644 deploy/kubernetes/elastic-agent-standalone/templates.d/postgresql.yml create mode 100644 deploy/kubernetes/elastic-agent-standalone/templates.d/prometheus.yml create mode 100644 deploy/kubernetes/elastic-agent-standalone/templates.d/qnap_nas.yml create mode 100644 deploy/kubernetes/elastic-agent-standalone/templates.d/rabbitmq.yml create mode 100644 deploy/kubernetes/elastic-agent-standalone/templates.d/redis.yml create mode 100644 deploy/kubernetes/elastic-agent-standalone/templates.d/santa.yml create mode 100644 deploy/kubernetes/elastic-agent-standalone/templates.d/security_detection_engine.yml create mode 100644 deploy/kubernetes/elastic-agent-standalone/templates.d/sentinel_one.yml create mode 100644 deploy/kubernetes/elastic-agent-standalone/templates.d/snort.yml create mode 100644 deploy/kubernetes/elastic-agent-standalone/templates.d/snyk.yml create mode 100644 deploy/kubernetes/elastic-agent-standalone/templates.d/stan.yml create mode 100644 deploy/kubernetes/elastic-agent-standalone/templates.d/suricata.yml create mode 100644 deploy/kubernetes/elastic-agent-standalone/templates.d/symantec_endpoint.yml create mode 100644 deploy/kubernetes/elastic-agent-standalone/templates.d/synthetics.yml create mode 100644 deploy/kubernetes/elastic-agent-standalone/templates.d/tcp.yml create mode 100644 deploy/kubernetes/elastic-agent-standalone/templates.d/tomcat.yml create mode 100644 deploy/kubernetes/elastic-agent-standalone/templates.d/traefik.yml create mode 100644 deploy/kubernetes/elastic-agent-standalone/templates.d/udp.yml create mode 100644 deploy/kubernetes/elastic-agent-standalone/templates.d/zeek.yml create mode 100644 deploy/kubernetes/elastic-agent-standalone/templates.d/zookeeper.yml diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/activemq.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/activemq.yml new file mode 100644 index 00000000000..8177cd731d2 --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/activemq.yml @@ -0,0 +1,96 @@ +inputs: + - name: activemq/metrics-activemq + type: activemq/metrics + use_output: default + streams: + - condition: ${kubernetes.hints.activemq.broker.enabled} == true or ${kubernetes.hints.activemq.enabled} == true + data_stream: + dataset: activemq.broker + type: metrics + hosts: + - ${kubernetes.hints.activemq.broker.host|'localhost:8161'} + metricsets: + - broker + password: ${kubernetes.hints.activemq.broker.password|'admin'} + path: /api/jolokia/?ignoreErrors=true&canonicalNaming=false + period: ${kubernetes.hints.activemq.broker.period|'10s'} + tags: + - forwarded + - activemq-broker + username: ${kubernetes.hints.activemq.broker.username|'admin'} + - condition: ${kubernetes.hints.activemq.queue.enabled} == true or ${kubernetes.hints.activemq.enabled} == true + data_stream: + dataset: activemq.queue + type: metrics + hosts: + - ${kubernetes.hints.activemq.queue.host|'localhost:8161'} + metricsets: + - queue + password: ${kubernetes.hints.activemq.queue.password|'admin'} + path: /api/jolokia/?ignoreErrors=true&canonicalNaming=false + period: ${kubernetes.hints.activemq.queue.period|'10s'} + tags: + - forwarded + - activemq-queue + username: ${kubernetes.hints.activemq.queue.username|'admin'} + - condition: ${kubernetes.hints.activemq.topic.enabled} == true or ${kubernetes.hints.activemq.enabled} == true + data_stream: + dataset: activemq.topic + type: metrics + hosts: + - ${kubernetes.hints.activemq.topic.host|'localhost:8161'} + metricsets: + - topic + password: ${kubernetes.hints.activemq.topic.password|'admin'} + path: /api/jolokia/?ignoreErrors=true&canonicalNaming=false + period: ${kubernetes.hints.activemq.topic.period|'10s'} + tags: + - forwarded + - activemq-topic + username: ${kubernetes.hints.activemq.topic.username|'admin'} + data_stream.namespace: default + - name: filestream-activemq + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.activemq.audit.enabled} == true or ${kubernetes.hints.activemq.enabled} == true + data_stream: + dataset: activemq.audit + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.activemq.audit.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - activemq-audit + - condition: ${kubernetes.hints.activemq.log.enabled} == true or ${kubernetes.hints.activemq.enabled} == true + data_stream: + dataset: activemq.log + type: logs + exclude_files: + - .gz$ + multiline: + match: after + negate: true + pattern: '^\d{4}-\d{2}-\d{2} ' + parsers: + - container: + format: auto + stream: ${kubernetes.hints.activemq.log.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - activemq-log + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/apache.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/apache.yml new file mode 100644 index 00000000000..bdf487d2d5c --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/apache.yml @@ -0,0 +1,134 @@ +inputs: + - name: apache/metrics-apache + type: apache/metrics + use_output: default + streams: + - condition: ${kubernetes.hints.apache.status.enabled} == true or ${kubernetes.hints.apache.enabled} == true + data_stream: + dataset: apache.status + type: metrics + hosts: + - ${kubernetes.hints.apache.status.host|'http://127.0.0.1'} + metricsets: + - status + period: ${kubernetes.hints.apache.status.period|'30s'} + server_status_path: /server-status + data_stream.namespace: default + - name: filestream-apache + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.apache.access.enabled} == true or ${kubernetes.hints.apache.enabled} == true + data_stream: + dataset: apache.access + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.apache.access.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + prospector: + scanner: + symlinks: true + tags: + - apache-access + - condition: ${kubernetes.hints.apache.error.enabled} == true or ${kubernetes.hints.apache.enabled} == true + data_stream: + dataset: apache.error + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.apache.error.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + processors: + - add_locale: null + prospector: + scanner: + symlinks: true + tags: + - apache-error + data_stream.namespace: default + - name: httpjson-apache + type: httpjson + use_output: default + streams: + - condition: ${kubernetes.hints.apache.access.enabled} == true and ${kubernetes.hints.apache.enabled} == true + config_version: "2" + cursor: + index_earliest: + value: '[[.last_event.result.max_indextime]]' + data_stream: + dataset: apache.access + type: logs + interval: 10s + request.method: POST + request.transforms: + - set: + target: url.params.search + value: search sourcetype="access*" | streamstats max(_indextime) AS max_indextime + - set: + target: url.params.output_mode + value: json + - set: + default: '[[(now (parseDuration "-10s")).Unix]]' + target: url.params.index_earliest + value: '[[ .cursor.index_earliest ]]' + - set: + target: url.params.index_latest + value: '[[(now).Unix]]' + - set: + target: header.Content-Type + value: application/x-www-form-urlencoded + request.url: https://server.example.com:8089/services/search/jobs/export + response.decode_as: application/x-ndjson + response.split: + delimiter: |4+ + target: body.result._raw + type: string + tags: + - forwarded + - apache-access + - condition: ${kubernetes.hints.apache.error.enabled} == true and ${kubernetes.hints.apache.enabled} == true + config_version: 2 + cursor: + index_earliest: + value: '[[.last_event.result.max_indextime]]' + data_stream: + dataset: apache.error + type: logs + interval: 10s + request.method: POST + request.transforms: + - set: + target: url.params.search + value: search sourcetype=apache:error OR sourcetype=apache_error | streamstats max(_indextime) AS max_indextime + - set: + target: url.params.output_mode + value: json + - set: + default: '[[(now (parseDuration "-10s")).Unix]]' + target: url.params.index_earliest + value: '[[ .cursor.index_earliest ]]' + - set: + target: url.params.index_latest + value: '[[(now).Unix]]' + - set: + target: header.Content-Type + value: application/x-www-form-urlencoded + request.url: https://server.example.com:8089/services/search/jobs/export + response.decode_as: application/x-ndjson + response.split: + delimiter: |4+ + target: body.result._raw + type: string + tags: + - forwarded + - apache-error + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/cassandra.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/cassandra.yml new file mode 100644 index 00000000000..296b330c807 --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/cassandra.yml @@ -0,0 +1,327 @@ +inputs: + - name: jolokia/metrics-cassandra + type: jolokia/metrics + use_output: default + streams: + - condition: ${kubernetes.hints.cassandra.metrics.enabled} == true or ${kubernetes.hints.cassandra.enabled} == true + data_stream: + dataset: cassandra.metrics + type: metrics + hosts: + - ${kubernetes.hints.cassandra.metrics.host|'localhost:8778'} + jmx.mappings: + - attributes: + - attr: ReleaseVersion + field: system.version + - attr: ClusterName + field: system.cluster + - attr: LiveNodes + field: system.live_nodes + - attr: UnreachableNodes + field: system.unreachable_nodes + - attr: LeavingNodes + field: system.leaving_nodes + - attr: JoiningNodes + field: system.joining_nodes + - attr: MovingNodes + field: system.moving_nodes + mbean: org.apache.cassandra.db:type=StorageService + - attributes: + - attr: Datacenter + field: system.data_center + - attr: Rack + field: system.rack + mbean: org.apache.cassandra.db:type=EndpointSnitchInfo + - attributes: + - attr: Count + field: storage.total_hint_in_progress + mbean: org.apache.cassandra.metrics:name=TotalHintsInProgress,type=Storage + - attributes: + - attr: Count + field: storage.total_hints + mbean: org.apache.cassandra.metrics:name=TotalHints,type=Storage + - attributes: + - attr: Count + field: storage.exceptions + mbean: org.apache.cassandra.metrics:name=Exceptions,type=Storage + - attributes: + - attr: Count + field: storage.load + mbean: org.apache.cassandra.metrics:name=Load,type=Storage + - attributes: + - attr: OneMinuteRate + field: hits.succeeded_per_second + mbean: org.apache.cassandra.metrics:type=HintsService,name=HintsSucceeded + - attributes: + - attr: OneMinuteRate + field: hits.failed_per_second + mbean: org.apache.cassandra.metrics:type=HintsService,name=HintsFailed + - attributes: + - attr: OneMinuteRate + field: hits.timed_out_per_second + mbean: org.apache.cassandra.metrics:type=HintsService,name=HintsTimedOut + - attributes: + - attr: CollectionTime + field: gc.concurrent_mark_sweep.collection_time + - attr: CollectionCount + field: gc.concurrent_mark_sweep.collection_count + mbean: java.lang:type=GarbageCollector,name=ConcurrentMarkSweep + - attributes: + - attr: CollectionTime + field: gc.par_new.collection_time + - attr: CollectionCount + field: gc.par_new.collection_count + mbean: java.lang:type=GarbageCollector,name=ParNew + - attributes: + - attr: HeapMemoryUsage + field: memory.heap_usage + - attr: NonHeapMemoryUsage + field: memory.other_usage + mbean: java.lang:type=Memory + - attributes: + - attr: Value + field: task.complete + mbean: org.apache.cassandra.metrics:name=CompletedTasks,type=CommitLog + - attributes: + - attr: Value + field: task.pending + mbean: org.apache.cassandra.metrics:name=PendingTasks,type=CommitLog + - attributes: + - attr: Value + field: task.total_commitlog_size + mbean: org.apache.cassandra.metrics:name=TotalCommitLogSize,type=CommitLog + - attributes: + - attr: Count + field: client_request.write.timeouts + - attr: OneMinuteRate + field: client_request.write.timeoutsms + mbean: org.apache.cassandra.metrics:type=ClientRequest,name=Timeouts,scope=Write + - attributes: + - attr: Count + field: client_request.write.unavailables + - attr: OneMinuteRate + field: client_request.write.unavailablesms + mbean: org.apache.cassandra.metrics:type=ClientRequest,name=Unavailables,scope=Write + - attributes: + - attr: Count + field: client_request.write.count + - attr: OneMinuteRate + field: client_request.write.one_minute_rate + mbean: org.apache.cassandra.metrics:type=ClientRequest,name=Latency,scope=Write + - attributes: + - attr: Count + field: client_request.write.total_latency + mbean: org.apache.cassandra.metrics:type=ClientRequest,name=TotalLatency,scope=Write + - attributes: + - attr: Count + field: client_request.read.timeouts + - attr: OneMinuteRate + field: client_request.read.timeoutsms + mbean: org.apache.cassandra.metrics:type=ClientRequest,name=Timeouts,scope=Read + - attributes: + - attr: Count + field: client_request.read.unavailables + - attr: OneMinuteRate + field: client_request.read.unavailablesms + mbean: org.apache.cassandra.metrics:type=ClientRequest,name=Unavailables,scope=Read + - attributes: + - attr: Count + field: client_request.read.count + - attr: OneMinuteRate + field: client_request.read.one_minute_rate + mbean: org.apache.cassandra.metrics:type=ClientRequest,name=Latency,scope=Read + - attributes: + - attr: Count + field: client_request.read.total_latency + mbean: org.apache.cassandra.metrics:type=ClientRequest,name=TotalLatency,scope=Read + - attributes: + - attr: OneMinuteRate + field: client_request.range_slice.one_minute_rate + mbean: org.apache.cassandra.metrics:type=ClientRequest,name=Latency,scope=RangeSlice + - attributes: + - attr: Count + field: client_request.range_slice.total_latency + mbean: org.apache.cassandra.metrics:type=ClientRequest,name=TotalLatency,scope=RangeSlice + - attributes: + - attr: OneMinuteRate + field: client_request.caswrite.one_minute_rate + mbean: org.apache.cassandra.metrics:type=ClientRequest,name=Latency,scope=CASWrite + - attributes: + - attr: OneMinuteRate + field: client_request.casread.one_minute_rate + mbean: org.apache.cassandra.metrics:type=ClientRequest,name=Latency,scope=CASRead + - attributes: + - attr: Value + field: client.connected_native_clients + mbean: org.apache.cassandra.metrics:type=Client,name=connectedNativeClients + - attributes: + - attr: Value + field: compaction.completed + mbean: org.apache.cassandra.metrics:name=CompletedTasks,type=Compaction + - attributes: + - attr: Value + field: compaction.pending + mbean: org.apache.cassandra.metrics:name=PendingTasks,type=Compaction + - attributes: + - attr: Value + field: table.live_ss_table_count + mbean: org.apache.cassandra.metrics:type=Table,name=LiveSSTableCount + - attributes: + - attr: Value + field: table.live_disk_space_used + mbean: org.apache.cassandra.metrics:type=Table,name=LiveDiskSpaceUsed + - attributes: + - attr: Value + field: table.all_memtables_heap_size + mbean: org.apache.cassandra.metrics:type=Table,name=AllMemtablesHeapSize + - attributes: + - attr: Value + field: table.all_memtables_off_heap_size + mbean: org.apache.cassandra.metrics:type=Table,name=AllMemtablesOffHeapSize + - attributes: + - attr: OneMinuteRate + field: cache.key_cache.requests.one_minute_rate + mbean: org.apache.cassandra.metrics:type=Cache,scope=KeyCache,name=Requests + - attributes: + - attr: Value + field: cache.key_cache.capacity + mbean: org.apache.cassandra.metrics:type=Cache,scope=KeyCache,name=Capacity + - attributes: + - attr: Value + field: cache.key_cache.one_minute_hit_rate + mbean: org.apache.cassandra.metrics:type=Cache,scope=KeyCache,name=OneMinuteHitRate + - attributes: + - attr: OneMinuteRate + field: cache.row_cache.requests.one_minute_rate + mbean: org.apache.cassandra.metrics:type=Cache,scope=RowCache,name=Requests + - attributes: + - attr: Value + field: cache.row_cache.capacity + mbean: org.apache.cassandra.metrics:type=Cache,scope=RowCache,name=Capacity + - attributes: + - attr: Value + field: cache.row_cache.one_minute_hit_rate + mbean: org.apache.cassandra.metrics:type=Cache,scope=RowCache,name=OneMinuteHitRate + - attributes: + - attr: Value + field: thread_pools.counter_mutation_stage.request.active + mbean: org.apache.cassandra.metrics:name=ActiveTasks,path=request,scope=CounterMutationStage,type=ThreadPools + - attributes: + - attr: Value + field: thread_pools.counter_mutation_stage.request.pending + mbean: org.apache.cassandra.metrics:name=PendingTasks,path=request,scope=CounterMutationStage,type=ThreadPools + - attributes: + - attr: Value + field: thread_pools.mutation_stage.request.active + mbean: org.apache.cassandra.metrics:name=ActiveTasks,path=request,scope=MutationStage,type=ThreadPools + - attributes: + - attr: Value + field: thread_pools.mutation_stage.request.pending + mbean: org.apache.cassandra.metrics:name=PendingTasks,path=request,scope=MutationStage,type=ThreadPools + - attributes: + - attr: Value + field: thread_pools.read_repair_stage.request.active + mbean: org.apache.cassandra.metrics:name=ActiveTasks,path=request,scope=ReadRepairStage,type=ThreadPools + - attributes: + - attr: Value + field: thread_pools.read_repair_stage.request.pending + mbean: org.apache.cassandra.metrics:name=PendingTasks,path=request,scope=ReadRepairStage,type=ThreadPools + - attributes: + - attr: Value + field: thread_pools.read_stage.request.active + mbean: org.apache.cassandra.metrics:name=ActiveTasks,path=request,scope=ReadStage,type=ThreadPools + - attributes: + - attr: Value + field: thread_pools.read_stage.request.pending + mbean: org.apache.cassandra.metrics:name=PendingTasks,path=request,scope=ReadStage,type=ThreadPools + - attributes: + - attr: Value + field: thread_pools.request_response_stage.request.active + mbean: org.apache.cassandra.metrics:name=ActiveTasks,path=request,scope=RequestResponseStage,type=ThreadPools + - attributes: + - attr: Value + field: thread_pools.request_response_stage.request.pending + mbean: org.apache.cassandra.metrics:name=PendingTasks,path=request,scope=RequestResponseStage,type=ThreadPools + - attributes: + - attr: Value + field: column_family.total_disk_space_used + mbean: org.apache.cassandra.metrics:name=TotalDiskSpaceUsed,type=ColumnFamily + - attributes: + - attr: Count + field: dropped_message.batch_remove + mbean: org.apache.cassandra.metrics:type=DroppedMessage,scope=BATCH_REMOVE,name=Dropped + - attributes: + - attr: Count + field: dropped_message.batch_store + mbean: org.apache.cassandra.metrics:type=DroppedMessage,scope=BATCH_STORE,name=Dropped + - attributes: + - attr: Count + field: dropped_message.counter_mutation + mbean: org.apache.cassandra.metrics:type=DroppedMessage,scope=COUNTER_MUTATION,name=Dropped + - attributes: + - attr: Count + field: dropped_message.hint + mbean: org.apache.cassandra.metrics:type=DroppedMessage,scope=HINT,name=Dropped + - attributes: + - attr: Count + field: dropped_message.mutation + mbean: org.apache.cassandra.metrics:type=DroppedMessage,scope=MUTATION,name=Dropped + - attributes: + - attr: Count + field: dropped_message.paged_range + mbean: org.apache.cassandra.metrics:type=DroppedMessage,scope=PAGED_RANGE,name=Dropped + - attributes: + - attr: Count + field: dropped_message.range_slice + mbean: org.apache.cassandra.metrics:type=DroppedMessage,scope=RANGE_SLICE,name=Dropped + - attributes: + - attr: Count + field: dropped_message.read + mbean: org.apache.cassandra.metrics:type=DroppedMessage,scope=READ,name=Dropped + - attributes: + - attr: Count + field: dropped_message.read_repair + mbean: org.apache.cassandra.metrics:type=DroppedMessage,scope=READ_REPAIR,name=Dropped + - attributes: + - attr: Count + field: dropped_message.request_response + mbean: org.apache.cassandra.metrics:type=DroppedMessage,scope=REQUEST_RESPONSE,name=Dropped + - attributes: + - attr: Count + field: dropped_message.trace + mbean: org.apache.cassandra.metrics:type=DroppedMessage,scope=_TRACE,name=Dropped + metricsets: + - jmx + namespace: metrics + password: ${kubernetes.hints.cassandra.metrics.password|'admin'} + path: /jolokia/?ignoreErrors=true&canonicalNaming=false + period: ${kubernetes.hints.cassandra.metrics.period|'10s'} + username: ${kubernetes.hints.cassandra.metrics.username|'admin'} + data_stream.namespace: default + - name: filestream-cassandra + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.cassandra.log.enabled} == true or ${kubernetes.hints.cassandra.enabled} == true + data_stream: + dataset: cassandra.log + type: logs + exclude_files: + - .gz$ + multiline: + match: after + negate: true + pattern: ^([A-Z]) + parsers: + - container: + format: auto + stream: ${kubernetes.hints.cassandra.log.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - cassandra-systemlogs + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/cef.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/cef.yml new file mode 100644 index 00000000000..524cb6159f3 --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/cef.yml @@ -0,0 +1,51 @@ +inputs: + - name: filestream-cef + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.cef.log.enabled} == true or ${kubernetes.hints.cef.enabled} == true + data_stream: + dataset: cef.log + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.cef.log.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + processors: + - rename: + fields: + - from: message + to: event.original + - decode_cef: + field: event.original + prospector: + scanner: + symlinks: true + tags: + - cef + - forwarded + data_stream.namespace: default + - name: udp-cef + type: udp + use_output: default + streams: + - condition: ${kubernetes.hints.cef.log.enabled} == true or ${kubernetes.hints.cef.enabled} == true + data_stream: + dataset: cef.log + type: logs + host: localhost:9003 + processors: + - rename: + fields: + - from: message + to: event.original + - decode_cef: + field: event.original + tags: + - cef + - forwarded + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/checkpoint.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/checkpoint.yml new file mode 100644 index 00000000000..c8d49475fb3 --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/checkpoint.yml @@ -0,0 +1,62 @@ +inputs: + - name: filestream-checkpoint + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.checkpoint.firewall.enabled} == true or ${kubernetes.hints.checkpoint.enabled} == true + data_stream: + dataset: checkpoint.firewall + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.checkpoint.firewall.stream|'all'} + paths: null + processors: + - add_locale: null + - add_fields: + fields: + internal_zones: + - trust + target: _temp_ + - add_fields: + fields: + external_zones: + - untrust + target: _temp_ + prospector: + scanner: + symlinks: true + tags: + - forwarded + data_stream.namespace: default + - name: tcp-checkpoint + type: tcp + use_output: default + streams: + - condition: ${kubernetes.hints.checkpoint.firewall.enabled} == true or ${kubernetes.hints.checkpoint.enabled} == true + data_stream: + dataset: checkpoint.firewall + type: logs + host: localhost:9001 + processors: + - add_locale: null + tags: + - forwarded + data_stream.namespace: default + - name: udp-checkpoint + type: udp + use_output: default + streams: + - condition: ${kubernetes.hints.checkpoint.firewall.enabled} == true or ${kubernetes.hints.checkpoint.enabled} == true + data_stream: + dataset: checkpoint.firewall + type: logs + host: localhost:9001 + processors: + - add_locale: null + tags: + - forwarded + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/cloud_security_posture.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/cloud_security_posture.yml new file mode 100644 index 00000000000..bbc867294c7 --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/cloud_security_posture.yml @@ -0,0 +1,93 @@ +inputs: + - name: cloudbeat/cis_k8s-cloud_security_posture + type: cloudbeat/cis_k8s + use_output: default + streams: + - condition: ${kubernetes.hints.cloud_security_posture.findings.enabled} == true or ${kubernetes.hints.cloud_security_posture.enabled} == true + data_stream: + dataset: cloud_security_posture.findings + type: logs + evaluator: + decision_logs: false + fetchers: + - name: kube-api + - directory: /hostfs + name: process + processes: + etcd: null + kube-apiserver: null + kube-controller: null + kube-scheduler: null + kubelet: + config-file-arguments: + - config + - name: file-system + patterns: + - /hostfs/etc/kubernetes/scheduler.conf + - /hostfs/etc/kubernetes/controller-manager.conf + - /hostfs/etc/kubernetes/admin.conf + - /hostfs/etc/kubernetes/kubelet.conf + - /hostfs/etc/kubernetes/manifests/etcd.yaml + - /hostfs/etc/kubernetes/manifests/kube-apiserver.yaml + - /hostfs/etc/kubernetes/manifests/kube-controller-manager.yaml + - /hostfs/etc/kubernetes/manifests/kube-scheduler.yaml + - /hostfs/etc/systemd/system/kubelet.service.d/10-kubeadm.conf + - /hostfs/etc/kubernetes/pki/* + - /hostfs/var/lib/kubelet/config.yaml + - /hostfs/var/lib/etcd + - /hostfs/etc/kubernetes/pki + name: Findings + period: 4h + processors: + - add_cluster_id: null + data_stream.namespace: default + - name: cloudbeat/cis_eks-cloud_security_posture + type: cloudbeat/cis_eks + use_output: default + streams: + - condition: ${kubernetes.hints.cloud_security_posture.findings.enabled} == true and ${kubernetes.hints.cloud_security_posture.enabled} == true + data_stream: + dataset: cloud_security_posture.findings + type: logs + evaluator: + decision_logs: false + fetchers: + - name: kube-api + - directory: /hostfs + name: process + processes: + kubelet: + config-file-arguments: + - config + - name: aws-ecr + - name: aws-elb + - name: file-system + patterns: + - /hostfs/etc/kubernetes/kubelet/kubelet-config.json + - /hostfs/var/lib/kubelet/kubeconfig + name: Findings + period: 4h + processors: + - add_cluster_id: null + data_stream.namespace: default + - name: filestream-cloud_security_posture + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.cloud_security_posture.container_logs.enabled} == true + data_stream: + dataset: kubernetes.container_logs + type: logs + exclude_files: [] + exclude_lines: [] + parsers: + - container: + format: auto + stream: all + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + prospector: + scanner: + symlinks: true + tags: [] + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/cockroachdb.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/cockroachdb.yml new file mode 100644 index 00000000000..531706b7345 --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/cockroachdb.yml @@ -0,0 +1,44 @@ +inputs: + - name: filestream-cockroachdb + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.cockroachdb.container_logs.enabled} == true + data_stream: + dataset: kubernetes.container_logs + type: logs + exclude_files: [] + exclude_lines: [] + parsers: + - container: + format: auto + stream: all + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + prospector: + scanner: + symlinks: true + tags: [] + data_stream.namespace: default + - name: prometheus/metrics-cockroachdb + type: prometheus/metrics + use_output: default + streams: + - bearer_token_file: null + condition: ${kubernetes.hints.cockroachdb.status.enabled} == true or ${kubernetes.hints.cockroachdb.enabled} == true + data_stream: + dataset: cockroachdb.status + type: metrics + hosts: + - ${kubernetes.hints.cockroachdb.status.host|'localhost:8080'} + metrics_filters.exclude: null + metrics_filters.include: null + metrics_path: /_status/vars + metricsets: + - collector + password: null + period: ${kubernetes.hints.cockroachdb.status.period|'10s'} + ssl.certificate_authorities: null + use_types: true + username: null + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/crowdstrike.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/crowdstrike.yml new file mode 100644 index 00000000000..95a2730c18b --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/crowdstrike.yml @@ -0,0 +1,79 @@ +inputs: + - name: filestream-crowdstrike + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.crowdstrike.falcon.enabled} == true or ${kubernetes.hints.crowdstrike.enabled} == true + data_stream: + dataset: crowdstrike.falcon + type: logs + exclude_files: + - .gz$ + multiline.match: after + multiline.max_lines: 5000 + multiline.negate: true + multiline.pattern: ^{ + multiline.timeout: 10 + parsers: + - container: + format: auto + stream: ${kubernetes.hints.crowdstrike.falcon.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - crowdstrike-falcon + - condition: ${kubernetes.hints.crowdstrike.fdr.enabled} == true or ${kubernetes.hints.crowdstrike.enabled} == true + data_stream: + dataset: crowdstrike.fdr + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.crowdstrike.fdr.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + processors: + - add_locale: null + prospector: + scanner: + symlinks: true + tags: + - forwarded + - crowdstrike-fdr + data_stream.namespace: default + - name: aws-s3-crowdstrike + type: aws-s3 + use_output: default + streams: + - condition: ${kubernetes.hints.crowdstrike.fdr.enabled} == true or ${kubernetes.hints.crowdstrike.enabled} == true + data_stream: + dataset: crowdstrike.fdr + type: logs + queue_url: null + sqs.notification_parsing_script.source: | + function parse(n) { + var m = JSON.parse(n); + var evts = []; + var files = m.files; + var bucket = m.bucket; + if (!Array.isArray(files) || (files.length == 0) || bucket == null || bucket == "") { + return evts; + } + files.forEach(function(f){ + var evt = new S3EventV2(); + evt.SetS3BucketName(bucket); + evt.SetS3ObjectKey(f.path); + evts.push(evt); + }); + return evts; + } + tags: + - forwarded + - crowdstrike-fdr + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/cyberarkpas.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/cyberarkpas.yml new file mode 100644 index 00000000000..4dc9361aa41 --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/cyberarkpas.yml @@ -0,0 +1,57 @@ +inputs: + - name: filestream-cyberarkpas + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.cyberarkpas.audit.enabled} == true and ${kubernetes.hints.cyberarkpas.enabled} == true + data_stream: + dataset: cyberarkpas.audit + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.cyberarkpas.audit.stream|'all'} + paths: null + processors: + - add_locale: null + prospector: + scanner: + symlinks: true + tags: + - forwarded + - cyberarkpas-audit + data_stream.namespace: default + - name: tcp-cyberarkpas + type: tcp + use_output: default + streams: + - condition: ${kubernetes.hints.cyberarkpas.audit.enabled} == true or ${kubernetes.hints.cyberarkpas.enabled} == true + data_stream: + dataset: cyberarkpas.audit + type: logs + host: localhost:9301 + processors: + - add_locale: null + tags: + - cyberarkpas-audit + - forwarded + tcp: null + data_stream.namespace: default + - name: udp-cyberarkpas + type: udp + use_output: default + streams: + - condition: ${kubernetes.hints.cyberarkpas.audit.enabled} == true or ${kubernetes.hints.cyberarkpas.enabled} == true + data_stream: + dataset: cyberarkpas.audit + type: logs + host: localhost:9301 + processors: + - add_locale: null + tags: + - cyberarkpas-audit + - forwarded + udp: null + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/elasticsearch.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/elasticsearch.yml new file mode 100644 index 00000000000..49503b63346 --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/elasticsearch.yml @@ -0,0 +1,288 @@ +inputs: + - name: filestream-elasticsearch + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.elasticsearch.audit.enabled} == true or ${kubernetes.hints.elasticsearch.enabled} == true + data_stream: + dataset: elasticsearch.audit + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.elasticsearch.audit.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + processors: + - add_locale: null + - add_fields: + fields: + ecs.version: 1.10.0 + target: "" + - else: + - script: + id: elasticsearch_audit + lang: javascript + source: | + var requestRegex = new RegExp("request_body=\\\[(.*)\\\]$"); function process(event) { + var message = event.Get("message"); + if (message !== null) { + var matches = message.match(requestRegex); + if (matches && matches.length > 1) { + event.Put("_request", matches[1]); + } + } + } + if: + regexp: + message: ^{ + then: + - decode_json_fields: + fields: + - message + target: _json + - rename: + fields: + - from: _json.request.body + to: _request + ignore_missing: true + - drop_fields: + fields: + - _json + - detect_mime_type: + field: _request + target: http.request.mime_type + - drop_fields: + fields: + - _request + ignore_missing: true + prospector: + scanner: + symlinks: true + - condition: ${kubernetes.hints.elasticsearch.deprecation.enabled} == true or ${kubernetes.hints.elasticsearch.enabled} == true + data_stream: + dataset: elasticsearch.deprecation + type: logs + exclude_files: + - .gz$ + - _slowlog.log$ + - _access.log$ + multiline: + match: after + negate: true + pattern: ^(\[[0-9]{4}-[0-9]{2}-[0-9]{2}|{) + parsers: + - container: + format: auto + stream: ${kubernetes.hints.elasticsearch.deprecation.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + processors: + - add_locale.when.not.regexp.message: ^{ + - add_fields: + fields: + ecs.version: 1.10.0 + target: "" + prospector: + scanner: + symlinks: true + - condition: ${kubernetes.hints.elasticsearch.gc.enabled} == true or ${kubernetes.hints.elasticsearch.enabled} == true + data_stream: + dataset: elasticsearch.gc + type: logs + exclude_files: + - .gz$ + exclude_lines: + - '^(OpenJDK|Java HotSpot).* Server VM ' + - '^CommandLine flags: ' + - '^Memory: ' + - ^{ + multiline: + match: after + negate: true + pattern: ^(\[?[0-9]{4}-[0-9]{2}-[0-9]{2}|{) + parsers: + - container: + format: auto + stream: ${kubernetes.hints.elasticsearch.gc.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + processors: + - add_fields: + fields: + ecs.version: 1.10.0 + target: "" + prospector: + scanner: + symlinks: true + - condition: ${kubernetes.hints.elasticsearch.server.enabled} == true or ${kubernetes.hints.elasticsearch.enabled} == true + data_stream: + dataset: elasticsearch.server + type: logs + exclude_files: + - .gz$ + - _slowlog.log$ + - _access.log$ + - _deprecation.log$ + multiline: + match: after + negate: true + pattern: ^(\[[0-9]{4}-[0-9]{2}-[0-9]{2}|{) + parsers: + - container: + format: auto + stream: ${kubernetes.hints.elasticsearch.server.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + processors: + - add_locale.when.not.regexp.message: ^{ + - add_fields: + fields: + ecs.version: 1.10.0 + target: "" + prospector: + scanner: + symlinks: true + - condition: ${kubernetes.hints.elasticsearch.slowlog.enabled} == true or ${kubernetes.hints.elasticsearch.enabled} == true + data_stream: + dataset: elasticsearch.slowlog + type: logs + exclude_files: + - .gz$ + multiline: + match: after + negate: true + pattern: ^(\[?[0-9]{4}-[0-9]{2}-[0-9]{2}|{) + parsers: + - container: + format: auto + stream: ${kubernetes.hints.elasticsearch.slowlog.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + processors: + - add_locale.when.not.regexp.message: ^{ + - add_fields: + fields: + ecs.version: 1.10.0 + target: "" + prospector: + scanner: + symlinks: true + data_stream.namespace: default + - name: elasticsearch/metrics-elasticsearch + type: elasticsearch/metrics + use_output: default + streams: + - condition: ${kubernetes.hints.elasticsearch.ccr.enabled} == true or ${kubernetes.hints.elasticsearch.enabled} == true + data_stream: + dataset: elasticsearch.stack_monitoring.ccr + type: metrics + hosts: + - ${kubernetes.hints.elasticsearch.ccr.host|'http://localhost:9200'} + metricsets: + - ccr + period: null + scope: node + - condition: ${kubernetes.hints.elasticsearch.cluster_stats.enabled} == true or ${kubernetes.hints.elasticsearch.enabled} == true + data_stream: + dataset: elasticsearch.stack_monitoring.cluster_stats + type: metrics + hosts: + - ${kubernetes.hints.elasticsearch.cluster_stats.host|'http://localhost:9200'} + metricsets: + - cluster_stats + period: null + scope: node + - condition: ${kubernetes.hints.elasticsearch.enrich.enabled} == true or ${kubernetes.hints.elasticsearch.enabled} == true + data_stream: + dataset: elasticsearch.stack_monitoring.enrich + type: metrics + hosts: + - ${kubernetes.hints.elasticsearch.enrich.host|'http://localhost:9200'} + metricsets: + - enrich + period: null + scope: node + - condition: ${kubernetes.hints.elasticsearch.index.enabled} == true or ${kubernetes.hints.elasticsearch.enabled} == true + data_stream: + dataset: elasticsearch.stack_monitoring.index + type: metrics + hosts: + - ${kubernetes.hints.elasticsearch.index.host|'http://localhost:9200'} + metricsets: + - index + period: null + scope: node + - condition: ${kubernetes.hints.elasticsearch.index_recovery.enabled} == true or ${kubernetes.hints.elasticsearch.enabled} == true + data_stream: + dataset: elasticsearch.stack_monitoring.index_recovery + type: metrics + hosts: + - ${kubernetes.hints.elasticsearch.index_recovery.host|'http://localhost:9200'} + metricsets: + - index_recovery + period: null + scope: node + - condition: ${kubernetes.hints.elasticsearch.index_summary.enabled} == true or ${kubernetes.hints.elasticsearch.enabled} == true + data_stream: + dataset: elasticsearch.stack_monitoring.index_summary + type: metrics + hosts: + - ${kubernetes.hints.elasticsearch.index_summary.host|'http://localhost:9200'} + metricsets: + - index_summary + period: null + scope: node + - condition: ${kubernetes.hints.elasticsearch.ml_job.enabled} == true or ${kubernetes.hints.elasticsearch.enabled} == true + data_stream: + dataset: elasticsearch.stack_monitoring.ml_job + type: metrics + hosts: + - ${kubernetes.hints.elasticsearch.ml_job.host|'http://localhost:9200'} + metricsets: + - ml_job + period: null + scope: node + - condition: ${kubernetes.hints.elasticsearch.node.enabled} == true or ${kubernetes.hints.elasticsearch.enabled} == true + data_stream: + dataset: elasticsearch.stack_monitoring.node + type: metrics + hosts: + - ${kubernetes.hints.elasticsearch.node.host|'http://localhost:9200'} + metricsets: + - node + period: null + scope: node + - condition: ${kubernetes.hints.elasticsearch.node_stats.enabled} == true or ${kubernetes.hints.elasticsearch.enabled} == true + data_stream: + dataset: elasticsearch.stack_monitoring.node_stats + type: metrics + hosts: + - ${kubernetes.hints.elasticsearch.node_stats.host|'http://localhost:9200'} + metricsets: + - node_stats + period: null + scope: node + - condition: ${kubernetes.hints.elasticsearch.pending_tasks.enabled} == true or ${kubernetes.hints.elasticsearch.enabled} == true + data_stream: + dataset: elasticsearch.stack_monitoring.pending_tasks + type: metrics + hosts: + - ${kubernetes.hints.elasticsearch.pending_tasks.host|'http://localhost:9200'} + metricsets: + - pending_tasks + period: null + scope: node + - condition: ${kubernetes.hints.elasticsearch.shard.enabled} == true or ${kubernetes.hints.elasticsearch.enabled} == true + data_stream: + dataset: elasticsearch.stack_monitoring.shard + type: metrics + hosts: + - ${kubernetes.hints.elasticsearch.shard.host|'http://localhost:9200'} + metricsets: + - shard + period: null + scope: node + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/endpoint.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/endpoint.yml new file mode 100644 index 00000000000..178a6098f99 --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/endpoint.yml @@ -0,0 +1,22 @@ +inputs: + - name: filestream-endpoint + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.endpoint.container_logs.enabled} == true + data_stream: + dataset: kubernetes.container_logs + type: logs + exclude_files: [] + exclude_lines: [] + parsers: + - container: + format: auto + stream: all + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + prospector: + scanner: + symlinks: true + tags: [] + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/fireeye.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/fireeye.yml new file mode 100644 index 00000000000..8e226e0d925 --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/fireeye.yml @@ -0,0 +1,59 @@ +inputs: + - name: tcp-fireeye + type: tcp + use_output: default + streams: + - condition: ${kubernetes.hints.fireeye.nx.enabled} == true or ${kubernetes.hints.fireeye.enabled} == true + data_stream: + dataset: fireeye.nx + type: logs + fields_under_root: true + host: localhost:9523 + processors: + - add_locale: null + tags: + - fireeye-nx + - forwarded + tcp: null + data_stream.namespace: default + - name: filestream-fireeye + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.fireeye.nx.enabled} == true or ${kubernetes.hints.fireeye.enabled} == true + data_stream: + dataset: fireeye.nx + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.fireeye.nx.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + processors: + - add_locale: null + prospector: + scanner: + symlinks: true + tags: + - fireeye-nx + data_stream.namespace: default + - name: udp-fireeye + type: udp + use_output: default + streams: + - condition: ${kubernetes.hints.fireeye.nx.enabled} == true or ${kubernetes.hints.fireeye.enabled} == true + data_stream: + dataset: fireeye.nx + type: logs + fields_under_root: true + host: localhost:9523 + processors: + - add_locale: null + tags: + - fireeye-nx + - forwarded + udp: null + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/haproxy.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/haproxy.yml new file mode 100644 index 00000000000..0f1debdee34 --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/haproxy.yml @@ -0,0 +1,68 @@ +inputs: + - name: haproxy/metrics-haproxy + type: haproxy/metrics + use_output: default + streams: + - condition: ${kubernetes.hints.haproxy.info.enabled} == true or ${kubernetes.hints.haproxy.enabled} == true + data_stream: + dataset: haproxy.info + type: metrics + hosts: + - ${kubernetes.hints.haproxy.info.host|'tcp://127.0.0.1:14567'} + metricsets: + - info + password: ${kubernetes.hints.haproxy.info.password|'admin'} + period: ${kubernetes.hints.haproxy.info.period|'10s'} + username: ${kubernetes.hints.haproxy.info.username|'admin'} + - condition: ${kubernetes.hints.haproxy.stat.enabled} == true or ${kubernetes.hints.haproxy.enabled} == true + data_stream: + dataset: haproxy.stat + type: metrics + hosts: + - ${kubernetes.hints.haproxy.stat.host|'tcp://127.0.0.1:14567'} + metricsets: + - stat + password: ${kubernetes.hints.haproxy.stat.password|'admin'} + period: ${kubernetes.hints.haproxy.stat.period|'10s'} + username: ${kubernetes.hints.haproxy.stat.username|'admin'} + data_stream.namespace: default + - name: filestream-haproxy + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.haproxy.log.enabled} == true or ${kubernetes.hints.haproxy.enabled} == true + data_stream: + dataset: haproxy.log + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.haproxy.log.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + processors: + - add_locale: null + prospector: + scanner: + symlinks: true + tags: + - haproxy-log + data_stream.namespace: default + - name: syslog-haproxy + type: syslog + use_output: default + streams: + - condition: ${kubernetes.hints.haproxy.log.enabled} == true or ${kubernetes.hints.haproxy.enabled} == true + data_stream: + dataset: haproxy.log + type: logs + processors: + - add_locale: null + protocol.udp: + host: localhost:9001 + tags: + - forwarded + - haproxy-log + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/hashicorp_vault.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/hashicorp_vault.yml new file mode 100644 index 00000000000..19892110b74 --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/hashicorp_vault.yml @@ -0,0 +1,73 @@ +inputs: + - name: filestream-hashicorp_vault + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.hashicorp_vault.audit.enabled} == true or ${kubernetes.hints.hashicorp_vault.enabled} == true + data_stream: + dataset: hashicorp_vault.audit + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.hashicorp_vault.audit.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + prospector: + scanner: + symlinks: true + tags: + - hashicorp-vault-audit + - condition: ${kubernetes.hints.hashicorp_vault.log.enabled} == true or ${kubernetes.hints.hashicorp_vault.enabled} == true + data_stream: + dataset: hashicorp_vault.log + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.hashicorp_vault.log.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + prospector: + scanner: + symlinks: true + tags: + - hashicorp-vault-log + data_stream.namespace: default + - name: tcp-hashicorp_vault + type: tcp + use_output: default + streams: + - condition: ${kubernetes.hints.hashicorp_vault.audit.enabled} == true and ${kubernetes.hints.hashicorp_vault.enabled} == true + data_stream: + dataset: hashicorp_vault.audit + type: logs + host: localhost:9007 + max_message_size: 1 MiB + tags: + - hashicorp-vault-audit + - forwarded + data_stream.namespace: default + - name: prometheus/metrics-hashicorp_vault + type: prometheus/metrics + use_output: default + streams: + - condition: ${kubernetes.hints.hashicorp_vault.metrics.enabled} == true or ${kubernetes.hints.hashicorp_vault.enabled} == true + data_stream: + dataset: hashicorp_vault.metrics + type: metrics + hosts: + - ${kubernetes.hints.hashicorp_vault.metrics.host|'http://localhost:8200'} + metrics_path: /v1/sys/metrics + metricsets: + - collector + period: ${kubernetes.hints.hashicorp_vault.metrics.period|'30s'} + query: + format: prometheus + rate_counters: true + use_types: true + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/hid_bravura_monitor.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/hid_bravura_monitor.yml new file mode 100644 index 00000000000..28d8f782d69 --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/hid_bravura_monitor.yml @@ -0,0 +1,42 @@ +inputs: + - name: filestream-hid_bravura_monitor + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.hid_bravura_monitor.log.enabled} == true or ${kubernetes.hints.hid_bravura_monitor.enabled} == true + data_stream: + dataset: hid_bravura_monitor.log + type: logs + line_terminator: carriage_return_line_feed + parsers: + - multiline: + match: after + negate: true + pattern: ^[[:cntrl:]] + type: pattern + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + processors: + - add_fields: + fields: + event.timezone: UTC + hid_bravura_monitor.environment: PRODUCTION + hid_bravura_monitor.instancename: default + hid_bravura_monitor.instancetype: Privilege-Identity-Password + hid_bravura_monitor.node: 0.0.0.0 + target: "" + prospector.scanner.exclude_files: + - .gz$ + tags: null + data_stream.namespace: default + - name: winlog-hid_bravura_monitor + type: winlog + use_output: default + streams: + - condition: ${kubernetes.hints.hid_bravura_monitor.winlog.enabled} == true or ${kubernetes.hints.hid_bravura_monitor.enabled} == true + data_stream: + dataset: hid_bravura_monitor.winlog + type: logs + name: Hitachi-Hitachi ID Systems-Hitachi ID Suite/Operational + tags: null + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/iis.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/iis.yml new file mode 100644 index 00000000000..44162f4ac6b --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/iis.yml @@ -0,0 +1,71 @@ +inputs: + - name: filestream-iis + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.iis.access.enabled} == true or ${kubernetes.hints.iis.enabled} == true + data_stream: + dataset: iis.access + type: logs + exclude_files: + - .gz$ + exclude_lines: + - ^# + parsers: + - container: + format: auto + stream: ${kubernetes.hints.iis.access.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + prospector: + scanner: + symlinks: true + tags: + - iis-access + - condition: ${kubernetes.hints.iis.error.enabled} == true or ${kubernetes.hints.iis.enabled} == true + data_stream: + dataset: iis.error + type: logs + exclude_files: + - .gz$ + exclude_lines: + - ^# + parsers: + - container: + format: auto + stream: ${kubernetes.hints.iis.error.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + prospector: + scanner: + symlinks: true + tags: + - iis-error + data_stream.namespace: default + - name: iis/metrics-iis + type: iis/metrics + use_output: default + streams: + - application_pool.name: null + condition: ${kubernetes.hints.iis.application_pool.enabled} == true or ${kubernetes.hints.iis.enabled} == true + data_stream: + dataset: iis.application_pool + type: metrics + metricsets: + - application_pool + period: ${kubernetes.hints.iis.application_pool.period|'10s'} + - condition: ${kubernetes.hints.iis.webserver.enabled} == true or ${kubernetes.hints.iis.enabled} == true + data_stream: + dataset: iis.webserver + type: metrics + metricsets: + - webserver + period: ${kubernetes.hints.iis.webserver.period|'10s'} + - condition: ${kubernetes.hints.iis.website.enabled} == true or ${kubernetes.hints.iis.enabled} == true + data_stream: + dataset: iis.website + type: metrics + metricsets: + - website + period: ${kubernetes.hints.iis.website.period|'10s'} + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/infoblox_nios.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/infoblox_nios.yml new file mode 100644 index 00000000000..ad76a72b86b --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/infoblox_nios.yml @@ -0,0 +1,51 @@ +inputs: + - name: filestream-infoblox_nios + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.infoblox_nios.log.enabled} == true or ${kubernetes.hints.infoblox_nios.enabled} == true + data_stream: + dataset: infoblox_nios.log + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.infoblox_nios.log.stream|'all'} + paths: null + processors: + - add_locale: null + prospector: + scanner: + symlinks: true + tags: + - forwarded + - infoblox_nios-log + data_stream.namespace: default + - name: tcp-infoblox_nios + type: tcp + use_output: default + streams: + - condition: ${kubernetes.hints.infoblox_nios.log.enabled} == true or ${kubernetes.hints.infoblox_nios.enabled} == true + data_stream: + dataset: infoblox_nios.log + type: logs + host: localhost:9027 + tags: + - forwarded + - infoblox_nios-log + data_stream.namespace: default + - name: udp-infoblox_nios + type: udp + use_output: default + streams: + - condition: ${kubernetes.hints.infoblox_nios.log.enabled} == true or ${kubernetes.hints.infoblox_nios.enabled} == true + data_stream: + dataset: infoblox_nios.log + type: logs + host: localhost:9028 + tags: + - forwarded + - infoblox_nios-log + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/iptables.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/iptables.yml new file mode 100644 index 00000000000..02d1d8330d3 --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/iptables.yml @@ -0,0 +1,54 @@ +inputs: + - name: udp-iptables + type: udp + use_output: default + streams: + - condition: ${kubernetes.hints.iptables.log.enabled} == true or ${kubernetes.hints.iptables.enabled} == true + data_stream: + dataset: iptables.log + type: logs + host: localhost:9001 + processors: + - add_locale: null + tags: + - iptables-log + - forwarded + data_stream.namespace: default + - name: filestream-iptables + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.iptables.log.enabled} == true and ${kubernetes.hints.iptables.enabled} == true + data_stream: + dataset: iptables.log + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.iptables.log.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + processors: + - add_locale: null + prospector: + scanner: + symlinks: true + tags: + - iptables-log + - forwarded + data_stream.namespace: default + - name: journald-iptables + type: journald + use_output: default + streams: + - condition: ${kubernetes.hints.iptables.log.enabled} == true or ${kubernetes.hints.iptables.enabled} == true + data_stream: + dataset: iptables.log + type: logs + include_matches: + - _TRANSPORT=kernel + tags: + - iptables-log + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/kafka.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/kafka.yml new file mode 100644 index 00000000000..c35cff8619d --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/kafka.yml @@ -0,0 +1,61 @@ +inputs: + - name: kafka/metrics-kafka + type: kafka/metrics + use_output: default + streams: + - condition: ${kubernetes.hints.kafka.broker.enabled} == true or ${kubernetes.hints.kafka.enabled} == true + data_stream: + dataset: kafka.broker + type: metrics + hosts: + - localhost:8778 + metricsets: + - broker + period: ${kubernetes.hints.kafka.broker.period|'10s'} + - condition: ${kubernetes.hints.kafka.consumergroup.enabled} == true or ${kubernetes.hints.kafka.enabled} == true + data_stream: + dataset: kafka.consumergroup + type: metrics + hosts: + - ${kubernetes.hints.kafka.consumergroup.host|'localhost:9092'} + metricsets: + - consumergroup + period: ${kubernetes.hints.kafka.consumergroup.period|'10s'} + - condition: ${kubernetes.hints.kafka.partition.enabled} == true or ${kubernetes.hints.kafka.enabled} == true + data_stream: + dataset: kafka.partition + type: metrics + hosts: + - ${kubernetes.hints.kafka.partition.host|'localhost:9092'} + metricsets: + - partition + period: ${kubernetes.hints.kafka.partition.period|'10s'} + data_stream.namespace: default + - name: filestream-kafka + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.kafka.log.enabled} == true or ${kubernetes.hints.kafka.enabled} == true + data_stream: + dataset: kafka.log + type: logs + exclude_files: + - .gz$ + multiline: + match: after + negate: true + pattern: ^\[ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.kafka.log.stream|'all'} + paths: + - /opt/kafka*/var/log/containers/*${kubernetes.hints.container_id}.log + processors: + - add_locale: null + prospector: + scanner: + symlinks: true + tags: + - kafka-log + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/keycloak.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/keycloak.yml new file mode 100644 index 00000000000..794d014d41c --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/keycloak.yml @@ -0,0 +1,23 @@ +inputs: + - name: filestream-keycloak + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.keycloak.log.enabled} == true or ${kubernetes.hints.keycloak.enabled} == true + data_stream: + dataset: keycloak.log + type: logs + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + processors: + - add_locale: null + - add_fields: + fields: + only_user_events: false + tz_offset: local + target: _tmp + prospector.scanner.exclude_files: + - \.gz$ + tags: + - keycloak-log + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/kibana.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/kibana.yml new file mode 100644 index 00000000000..1c27b4830ab --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/kibana.yml @@ -0,0 +1,112 @@ +inputs: + - name: filestream-kibana + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.kibana.audit.enabled} == true or ${kubernetes.hints.kibana.enabled} == true + data_stream: + dataset: kibana.audit + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.kibana.audit.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + processors: + - add_locale: null + - add_fields: + fields: + ecs.version: 1.10.0 + target: "" + - decode_json_fields: + fields: + - message + target: kibana._audit_temp + prospector: + scanner: + symlinks: true + - condition: ${kubernetes.hints.kibana.log.enabled} == true or ${kubernetes.hints.kibana.enabled} == true + data_stream: + dataset: kibana.log + type: logs + exclude_files: + - .gz$ + json.add_error_key: true + json.keys_under_root: false + parsers: + - container: + format: auto + stream: ${kubernetes.hints.kibana.log.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + processors: + - add_fields: + fields: + ecs.version: 1.10.0 + target: "" + prospector: + scanner: + symlinks: true + data_stream.namespace: default + - name: kibana/metrics-kibana + type: kibana/metrics + use_output: default + streams: + - condition: ${kubernetes.hints.kibana.cluster_actions.enabled} == true or ${kubernetes.hints.kibana.enabled} == true + data_stream: + dataset: kibana.stack_monitoring.cluster_actions + type: metrics + hosts: + - ${kubernetes.hints.kibana.cluster_actions.host|'http://localhost:5601'} + metricsets: + - cluster_actions + period: null + - condition: ${kubernetes.hints.kibana.cluster_rules.enabled} == true or ${kubernetes.hints.kibana.enabled} == true + data_stream: + dataset: kibana.stack_monitoring.cluster_rules + type: metrics + hosts: + - ${kubernetes.hints.kibana.cluster_rules.host|'http://localhost:5601'} + metricsets: + - cluster_rules + period: null + - condition: ${kubernetes.hints.kibana.node_actions.enabled} == true or ${kubernetes.hints.kibana.enabled} == true + data_stream: + dataset: kibana.stack_monitoring.node_actions + type: metrics + hosts: + - ${kubernetes.hints.kibana.node_actions.host|'http://localhost:5601'} + metricsets: + - node_actions + period: null + - condition: ${kubernetes.hints.kibana.node_rules.enabled} == true or ${kubernetes.hints.kibana.enabled} == true + data_stream: + dataset: kibana.stack_monitoring.node_rules + type: metrics + hosts: + - ${kubernetes.hints.kibana.node_rules.host|'http://localhost:5601'} + metricsets: + - node_rules + period: null + - condition: ${kubernetes.hints.kibana.stats.enabled} == true or ${kubernetes.hints.kibana.enabled} == true + data_stream: + dataset: kibana.stack_monitoring.stats + type: metrics + hosts: + - ${kubernetes.hints.kibana.stats.host|'http://localhost:5601'} + metricsets: + - stats + period: null + - condition: ${kubernetes.hints.kibana.status.enabled} == true or ${kubernetes.hints.kibana.enabled} == true + data_stream: + dataset: kibana.stack_monitoring.status + type: metrics + hosts: + - ${kubernetes.hints.kibana.status.host|'http://localhost:5601'} + metricsets: + - status + period: null + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/log.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/log.yml new file mode 100644 index 00000000000..b4627a13814 --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/log.yml @@ -0,0 +1,18 @@ +inputs: + - name: filestream-log + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.log.log.enabled} == true or ${kubernetes.hints.log.enabled} == true + data_stream: + dataset: log.log + type: logs + parsers: + - container: + format: auto + stream: ${kubernetes.hints.log.log.stream|'all'} + paths: null + prospector: + scanner: + symlinks: true + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/logstash.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/logstash.yml new file mode 100644 index 00000000000..6ba62de3274 --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/logstash.yml @@ -0,0 +1,75 @@ +inputs: + - name: filestream-logstash + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.logstash.log.enabled} == true or ${kubernetes.hints.logstash.enabled} == true + data_stream: + dataset: logstash.log + type: logs + exclude_files: + - .gz$ + multiline: + match: after + negate: true + pattern: ^((\[[0-9]{4}-[0-9]{2}-[0-9]{2}[^\]]+\])|({.+})) + parsers: + - container: + format: auto + stream: ${kubernetes.hints.logstash.log.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + processors: + - add_locale.when.not.regexp.message: ^{ + - add_fields: + fields: + ecs.version: 1.10.0 + target: "" + prospector: + scanner: + symlinks: true + - condition: ${kubernetes.hints.logstash.slowlog.enabled} == true or ${kubernetes.hints.logstash.enabled} == true + data_stream: + dataset: logstash.slowlog + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.logstash.slowlog.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + processors: + - add_locale.when.not.regexp.message: ^{ + - add_fields: + fields: + ecs.version: 1.10.0 + target: "" + prospector: + scanner: + symlinks: true + data_stream.namespace: default + - name: logstash/metrics-logstash + type: logstash/metrics + use_output: default + streams: + - condition: ${kubernetes.hints.logstash.node.enabled} == true or ${kubernetes.hints.logstash.enabled} == true + data_stream: + dataset: logstash.stack_monitoring.node + type: metrics + hosts: + - ${kubernetes.hints.logstash.node.host|'http://localhost:9600'} + metricsets: + - node + period: ${kubernetes.hints.logstash.node.period|'10s'} + - condition: ${kubernetes.hints.logstash.node_stats.enabled} == true or ${kubernetes.hints.logstash.enabled} == true + data_stream: + dataset: logstash.stack_monitoring.node_stats + type: metrics + hosts: + - ${kubernetes.hints.logstash.node_stats.host|'http://localhost:9600'} + metricsets: + - node_stats + period: ${kubernetes.hints.logstash.node_stats.period|'10s'} + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/mattermost.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/mattermost.yml new file mode 100644 index 00000000000..de5c8932af1 --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/mattermost.yml @@ -0,0 +1,22 @@ +inputs: + - name: filestream-mattermost + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.mattermost.audit.enabled} == true or ${kubernetes.hints.mattermost.enabled} == true + data_stream: + dataset: mattermost.audit + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.mattermost.audit.stream|'all'} + paths: null + prospector: + scanner: + symlinks: true + tags: + - mattermost-audit + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/microsoft_sqlserver.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/microsoft_sqlserver.yml new file mode 100644 index 00000000000..5ac70293051 --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/microsoft_sqlserver.yml @@ -0,0 +1,127 @@ +inputs: + - name: winlog-microsoft_sqlserver + type: winlog + use_output: default + streams: + - condition: ${kubernetes.hints.microsoft_sqlserver.audit.enabled} == true or ${kubernetes.hints.microsoft_sqlserver.enabled} == true + data_stream: + dataset: microsoft_sqlserver.audit + type: logs + event_id: 33205 + ignore_older: 72h + name: Security + data_stream.namespace: default + - name: filestream-microsoft_sqlserver + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.microsoft_sqlserver.log.enabled} == true and ${kubernetes.hints.microsoft_sqlserver.enabled} == true + data_stream: + dataset: microsoft_sqlserver.log + type: logs + exclude_files: + - .gz$ + multiline: + match: after + negate: true + pattern: ^\d{4}-\d{2}-\d{2} + parsers: + - container: + format: auto + stream: ${kubernetes.hints.microsoft_sqlserver.log.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + prospector: + scanner: + symlinks: true + tags: + - mssql-logs + data_stream.namespace: default + - name: sql/metrics-microsoft_sqlserver + type: sql/metrics + use_output: default + streams: + - condition: ${kubernetes.hints.microsoft_sqlserver.performance.enabled} == true and ${kubernetes.hints.microsoft_sqlserver.enabled} == true + data_stream: + dataset: microsoft_sqlserver.performance + type: metrics + driver: mssql + dynamic_counter_name: Memory Grants Pending + hosts: + - sqlserver://${kubernetes.hints.microsoft_sqlserver.performance.username|'domain\username'}:${kubernetes.hints.microsoft_sqlserver.performance.password|'verysecurepassword'}@${kubernetes.hints.microsoft_sqlserver.performance.host|'localhost'}:1433 + metricsets: + - query + period: ${kubernetes.hints.microsoft_sqlserver.performance.period|'60s'} + raw_data.enabled: true + sql_queries: + - query: SELECT cntr_value As 'user_connections' FROM sys.dm_os_performance_counters WHERE counter_name= 'User Connections' + response_format: table + - query: SELECT cntr_value As 'active_temp_tables' FROM sys.dm_os_performance_counters WHERE counter_name = 'Active Temp Tables' AND object_name like '%General Statistics%' + response_format: table + - query: SELECT cntr_value As 'buffer_cache_hit_ratio' FROM sys.dm_os_performance_counters WHERE counter_name = 'Buffer cache hit ratio' AND object_name like '%Buffer Manager%' + response_format: table + - query: SELECT cntr_value As 'page_splits_per_sec' FROM sys.dm_os_performance_counters WHERE counter_name = 'Page splits/sec' + response_format: table + - query: SELECT cntr_value As 'lock_waits_per_sec' FROM sys.dm_os_performance_counters WHERE counter_name = 'Lock Waits/sec' AND instance_name = '_Total' + response_format: table + - query: SELECT cntr_value As 'compilations_per_sec' FROM sys.dm_os_performance_counters WHERE counter_name = 'SQL Compilations/sec' + response_format: table + - query: SELECT cntr_value As 'batch_requests_per_sec' FROM sys.dm_os_performance_counters WHERE counter_name = 'Batch Requests/sec' + response_format: table + - query: SELECT cntr_value As 'buffer_checkpoint_pages_per_sec' FROM sys.dm_os_performance_counters WHERE counter_name = 'Checkpoint pages/sec' AND object_name like '%Buffer Manager%' + response_format: table + - query: SELECT cntr_value As 'buffer_database_pages' FROM sys.dm_os_performance_counters WHERE counter_name = 'Database pages' AND object_name like '%Buffer Manager%' + response_format: table + - query: SELECT cntr_value As 'buffer_page_life_expectancy' FROM sys.dm_os_performance_counters WHERE counter_name = 'Page life expectancy' AND object_name like '%Buffer Manager%' + response_format: table + - query: SELECT cntr_value As 'buffer_target_pages' FROM sys.dm_os_performance_counters WHERE counter_name = 'Target pages' AND object_name like '%Buffer Manager%' + response_format: table + - query: SELECT cntr_value As 'connection_reset_per_sec' FROM sys.dm_os_performance_counters WHERE counter_name = 'Connection Reset/sec' AND object_name like '%Buffer Manager%' + response_format: table + - query: SELECT cntr_value As 'logins_per_sec' FROM sys.dm_os_performance_counters WHERE counter_name = 'Logins/sec' AND object_name like '%General Statistics%' + response_format: table + - query: SELECT cntr_value As 'logouts_per_sec' FROM sys.dm_os_performance_counters WHERE counter_name = 'Logouts/sec' AND object_name like '%General Statistics%' + response_format: table + - query: SELECT cntr_value As 'transactions' FROM sys.dm_os_performance_counters WHERE counter_name = 'Transactions' AND object_name like '%General Statistics%' + response_format: table + - query: SELECT cntr_value As 're_compilations_per_sec' FROM sys.dm_os_performance_counters WHERE counter_name = 'SQL Re-Compilations/sec' + response_format: table + - query: SELECT counter_name As 'dynamic_counter.name', cntr_value As 'dynamic_counter.value' FROM sys.dm_os_performance_counters WHERE counter_name= 'Memory Grants Pending' + response_format: table + - condition: ${kubernetes.hints.microsoft_sqlserver.transaction_log.enabled} == true and ${kubernetes.hints.microsoft_sqlserver.enabled} == true + data_stream: + dataset: microsoft_sqlserver.transaction_log + type: metrics + driver: mssql + hosts: + - sqlserver://${kubernetes.hints.microsoft_sqlserver.transaction_log.username|'domain\username'}:${kubernetes.hints.microsoft_sqlserver.transaction_log.password|'verysecurepassword'}@${kubernetes.hints.microsoft_sqlserver.transaction_log.host|'localhost'}:1433 + metricsets: + - query + period: ${kubernetes.hints.microsoft_sqlserver.transaction_log.period|'60s'} + raw_data.enabled: true + sql_queries: + - query: SELECT name As 'database_name', database_id FROM sys.databases WHERE database_id=1; + response_format: table + - query: SELECT 'master' As database_name, database_id,total_log_size_mb,active_log_size_mb,log_backup_time,log_since_last_log_backup_mb,log_since_last_checkpoint_mb,log_recovery_size_mb FROM sys.dm_db_log_stats(1) master + response_format: table + - query: SELECT 'master' As 'database_name', total_log_size_in_bytes As total_log_size_bytes, used_log_space_in_bytes As used_log_space_bytes, used_log_space_in_percent As used_log_space_pct, log_space_in_bytes_since_last_backup FROM sys.dm_db_log_space_usage master + response_format: table + - query: SELECT name As 'database_name', database_id FROM sys.databases WHERE database_id=2; + response_format: table + - query: SELECT 'tempdb' As 'database_name', database_id,total_log_size_mb,active_log_size_mb As active_log_size,log_backup_time,log_since_last_log_backup_mb, log_since_last_checkpoint_mb,log_recovery_size_mb FROM sys.dm_db_log_stats(2) tempdb + response_format: table + - query: SELECT 'tempdb' As 'database_name', total_log_size_in_bytes As total_log_size_bytes, used_log_space_in_bytes As used_log_space_bytes, used_log_space_in_percent As used_log_space_pct, log_space_in_bytes_since_last_backup FROM sys.dm_db_log_space_usage tempdb + response_format: table + - query: SELECT name As 'database_name', database_id FROM sys.databases WHERE database_id=3; + response_format: table + - query: SELECT 'model' As 'database_name', database_id,total_log_size_mb,active_log_size_mb As active_log_size,log_backup_time,log_since_last_log_backup_mb, log_since_last_checkpoint_mb,log_recovery_size_mb FROM sys.dm_db_log_stats(3) model + response_format: table + - query: SELECT 'model' As 'database_name', total_log_size_in_bytes As total_log_size_bytes, used_log_space_in_bytes As used_log_space_bytes, used_log_space_in_percent As used_log_space_pct, log_space_in_bytes_since_last_backup FROM sys.dm_db_log_space_usage model + response_format: table + - query: SELECT name As 'database_name', database_id FROM sys.databases WHERE database_id=4; + response_format: table + - query: SELECT 'msdb' As 'database_name', database_id,total_log_size_mb,active_log_size_mb As active_log_size,log_backup_time,log_since_last_log_backup_mb, log_since_last_checkpoint_mb,log_recovery_size_mb FROM sys.dm_db_log_stats(4) msdb + response_format: table + - query: SELECT 'msdb' As 'database_name', total_log_size_in_bytes As total_log_size_bytes, used_log_space_in_bytes As used_log_space_bytes, used_log_space_in_percent As used_log_space_pct, log_space_in_bytes_since_last_backup FROM sys.dm_db_log_space_usage msdb + response_format: table + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/mimecast.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/mimecast.yml new file mode 100644 index 00000000000..23139e47852 --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/mimecast.yml @@ -0,0 +1,381 @@ +inputs: + - name: httpjson-mimecast + type: httpjson + use_output: default + streams: + - condition: ${kubernetes.hints.mimecast.audit_events.enabled} == true or ${kubernetes.hints.mimecast.enabled} == true + config_version: "2" + cursor: + next_date: + value: '[[.first_event.eventTime]]' + data_stream: + dataset: mimecast.audit_events + type: logs + interval: 5m + request.method: POST + request.transforms: + - set: + target: body.meta.pagination.pageSize + value: 500 + - set: + default: '[{"endDateTime": "[[formatDate (now) "2006-01-02T15:04:05+0700"]]", "startDateTime":"[[formatDate (now (parseDuration "-5m")) "2006-01-02T15:04:05+0700"]]"}]' + target: body.data + value: '[{"endDateTime": "[[formatDate (now) "2006-01-02T15:04:05+0700"]]", "startDateTime":"[[.cursor.next_date]]"}]' + value_type: json + - set: + target: header.x-mc-app-id + value: null + - set: + target: header.x-mc-date + value: '[[formatDate (now) "RFC1123"]]' + - set: + target: header.x-mc-req-id + value: '[[uuid]]' + - set: + fail_on_template_error: true + target: header.Authorization + value: MC :[[hmacBase64 "sha1" (base64Decode "") (sprintf "%s:%s:/api/audit/get-audit-events:" (.header.Get "x-mc-date") (.header.Get "x-mc-req-id"))]] + request.url: https://eu-api.mimecast.com/api/audit/get-audit-events + response.decode_as: application/json + response.pagination: + - set: + fail_on_template_error: true + target: body.meta.pagination.pageToken + value: '[[.last_response.body.meta.pagination.next]]' + response.split: + target: body.data + tags: + - forwarded + - mimecast-audit-events + - condition: ${kubernetes.hints.mimecast.dlp_logs.enabled} == true or ${kubernetes.hints.mimecast.enabled} == true + config_version: "2" + cursor: null + data_stream: + dataset: mimecast.dlp_logs + type: logs + interval: 5m + next_date: + value: '[[.first_event.eventTime]]' + request.method: POST + request.transforms: + - set: + default: '[{"to": "[[formatDate (now) "2006-01-02T15:04:05+0700"]]", "from":"[[formatDate (now (parseDuration "-5m")) "2006-01-02T15:04:05+0700"]]"}]' + target: body.data + value: '[{"to": "[[formatDate (now) "2006-01-02T15:04:05+0700"]]", "from":"[[.cursor.eventTime]]"}]' + value_type: json + - set: + target: header.x-mc-app-id + value: null + - set: + target: header.x-mc-date + value: '[[formatDate (now) "RFC1123"]]' + - set: + target: header.x-mc-req-id + value: '[[uuid]]' + - set: + fail_on_template_error: true + target: header.Authorization + value: MC :[[hmacBase64 "sha1" (base64Decode "") (sprintf "%s:%s:/api/dlp/get-logs:" (.header.Get "x-mc-date") (.header.Get "x-mc-req-id"))]] + request.url: https://eu-api.mimecast.com/api/dlp/get-logs + response.decode_as: application/json + response.pagination: + - set: + fail_on_template_error: true + target: body.meta.pagination.pageToken + value: '[[.last_response.body.meta.pagination.next]]' + response.split: + split: + target: body.dlpLogs + target: body.data + tags: + - forwarded + - mimecast-dlp-logs + - condition: ${kubernetes.hints.mimecast.siem_logs.enabled} == true or ${kubernetes.hints.mimecast.enabled} == true + config_version: "2" + cursor: + next_token: + value: '[[.last_response.header.Get "mc-siem-token"]]' + data_stream: + dataset: mimecast.siem_logs + type: logs + interval: 5m + request.method: POST + request.transforms: + - set: + default: '[{"type":"MTA","fileFormat":"json", "compress":true}]' + target: body.data + value: '[{"type":"MTA","fileFormat":"json", "compress":true, "token": "[[.cursor.next_token]]"}]' + value_type: json + - set: + target: header.x-mc-app-id + value: null + - set: + target: header.x-mc-date + value: '[[formatDate (now) "RFC1123"]]' + - set: + target: header.x-mc-req-id + value: '[[uuid]]' + - set: + fail_on_template_error: true + target: header.Authorization + value: MC :[[hmacBase64 "sha1" (base64Decode "") (sprintf "%s:%s:/api/audit/get-siem-logs:" (.header.Get "x-mc-date") (.header.Get "x-mc-req-id"))]] + - set: + target: header.Accept + value: '*/*' + request.url: https://eu-api.mimecast.com/api/audit/get-siem-logs + response.decode_as: application/zip + response.pagination: + - set: + target: body.data + value: '[{"type":"MTA","fileFormat":"json", "compress":true, "token": "[[.last_response.header.Get "mc-siem-token"]]"}]' + value_type: json + response.split: + target: body.data + transforms: + - set: + target: body.Content-Disposition + value: '[[.last_response.header.Get "Content-Disposition"]]' + tags: + - forwarded + - mimecast-siem-logs + - condition: ${kubernetes.hints.mimecast.threat_intel_malware_customer.enabled} == true or ${kubernetes.hints.mimecast.enabled} == true + config_version: "2" + cursor: + next_date: + value: '[[.first_event.created]]' + data_stream: + dataset: mimecast.threat_intel_malware_customer + type: logs + interval: 5m + request.method: POST + request.transforms: + - set: + default: '[{"feedType": "malware_customer","fileType": "stix","compress": false,"end": "[[formatDate (now) "2006-01-02T15:04:05+0700"]]", "start":"[[formatDate (now (parseDuration "-5m")) "2006-01-02T15:04:05+0700"]]"}]' + target: body.data + value: '[{"feedType": "malware_customer","fileType": "stix","compress": false,"end": "[[formatDate (now) "2006-01-02T15:04:05+0700"]]", "start":"[[formatDate (.cursor.next_date) "2006-01-02T15:04:05+0700"]]"}]' + value_type: json + - set: + target: header.x-mc-app-id + value: null + - set: + target: header.x-mc-date + value: '[[formatDate (now) "RFC1123"]]' + - set: + target: header.x-mc-req-id + value: '[[uuid]]' + - set: + fail_on_template_error: true + target: header.Authorization + value: MC :[[hmacBase64 "sha1" (base64Decode "") (sprintf "%s:%s:/api/ttp/threat-intel/get-feed:" (.header.Get "x-mc-date") (.header.Get "x-mc-req-id"))]] + request.url: https://eu-api.mimecast.com/api/ttp/threat-intel/get-feed + response.decode_as: application/json + response.pagination: + - set: + target: body.data + value: '[{"feedType": "malware_customer","fileType": "stix","compress": false,"token": "[[.last_response.header.Get "x-mc-threat-feed-next-token"]]","end": "[[formatDate (now) "2006-01-02T15:04:05+0700"]]", "start":"[[.cursor.next_date]]"}]' + value_type: json + response.split: + target: body.objects + transforms: + - set: + target: body.Content-Disposition + value: '[[.last_response.header.Get "Content-Disposition"]]' + tags: + - forwarded + - mimecast-threat-intel-feed-malware-customer + - condition: ${kubernetes.hints.mimecast.threat_intel_malware_grid.enabled} == true or ${kubernetes.hints.mimecast.enabled} == true + config_version: "2" + cursor: + next_date: + value: '[[.first_event.created]]' + data_stream: + dataset: mimecast.threat_intel_malware_grid + type: logs + interval: 5m + request.method: POST + request.transforms: + - set: + default: '[{"feedType": "malware_grid","fileType": "stix","compress": false,"end": "[[formatDate (now) "2006-01-02T15:04:05+0700"]]", "start":"[[formatDate (now (parseDuration "-5m")) "2006-01-02T15:04:05+0700"]]"}]' + target: body.data + value: '[{"feedType": "malware_grid","fileType": "stix","compress": false,"end": "[[formatDate (now) "2006-01-02T15:04:05+0700"]]", "start":"[[formatDate (.cursor.next_date) "2006-01-02T15:04:05+0700"]]"}]' + value_type: json + - set: + target: header.x-mc-app-id + value: null + - set: + target: header.x-mc-date + value: '[[formatDate (now) "RFC1123"]]' + - set: + target: header.x-mc-req-id + value: '[[uuid]]' + - set: + fail_on_template_error: true + target: header.Authorization + value: MC :[[hmacBase64 "sha1" (base64Decode "") (sprintf "%s:%s:/api/ttp/threat-intel/get-feed:" (.header.Get "x-mc-date") (.header.Get "x-mc-req-id"))]] + request.url: https://eu-api.mimecast.com/api/ttp/threat-intel/get-feed + response.decode_as: application/json + response.pagination: + - set: + target: body.data + value: '[{"feedType": "malware_grid","fileType": "stix","compress": false,"token": "[[.last_response.header.Get "x-mc-threat-feed-next-token"]]","end": "[[formatDate (now) "2006-01-02T15:04:05+0700"]]", "start":"[[.cursor.next_date]]"}]' + value_type: json + response.split: + target: body.objects + transforms: + - set: + target: body.Content-Disposition + value: '[[.last_response.header.Get "Content-Disposition"]]' + tags: + - forwarded + - mimecast-threat-intel-feed-malware-grid + - condition: ${kubernetes.hints.mimecast.ttp_ap_logs.enabled} == true or ${kubernetes.hints.mimecast.enabled} == true + config_version: "2" + cursor: null + data_stream: + dataset: mimecast.ttp_ap_logs + type: logs + interval: 5m + next_date: + value: '[[.first_event.date]]' + request.method: POST + request.transforms: + - set: + default: '[{"oldestFirst": false, "route": "all", "result":"all","to": "[[formatDate (now) "2006-01-02T15:04:05+0700"]]", "from":"[[formatDate (now (parseDuration "-5m")) "2006-01-02T15:04:05+0700"]]"}]' + target: body.data + value: '[{"oldestFirst": false, "route": "all", "result":"all","to": "[[formatDate (now) "2006-01-02T15:04:05+0700"]]", "from":"[[.cursor.next_date]]"}]' + value_type: json + - set: + target: header.x-mc-app-id + value: null + - set: + target: header.x-mc-date + value: '[[formatDate (now) "RFC1123"]]' + - set: + target: header.x-mc-req-id + value: '[[uuid]]' + - set: + fail_on_template_error: true + target: header.Authorization + value: MC :[[hmacBase64 "sha1" (base64Decode "") (sprintf "%s:%s:/api/ttp/attachment/get-logs:" (.header.Get "x-mc-date") (.header.Get "x-mc-req-id"))]] + request.url: https://eu-api.mimecast.com/api/ttp/attachment/get-logs + response.decode_as: application/json + response.pagination: + - set: + fail_on_template_error: true + target: body.meta.pagination.pageToken + value: '[[.last_response.body.meta.pagination.next]]' + response.split: + split: + target: body.attachmentLogs + target: body.data + tags: + - forwarded + - mimecast-ttp-ap + - condition: ${kubernetes.hints.mimecast.ttp_ip_logs.enabled} == true or ${kubernetes.hints.mimecast.enabled} == true + config_version: "2" + cursor: null + data_stream: + dataset: mimecast.ttp_ip_logs + type: logs + interval: 5m + next_date: + value: '[[.first_event.eventTime]]' + request.method: POST + request.transforms: + - set: + default: '[{"oldestFirst": false,"to": "[[formatDate (now) "2006-01-02T15:04:05+0700"]]", "from":"[[formatDate (now (parseDuration "-5m")) "2006-01-02T15:04:05+0700"]]"}]' + target: body.data + value: '[{"oldestFirst": false,"to": "[[formatDate (now) "2006-01-02T15:04:05+0700"]]", "from":"[[.cursor.next_date]]"}]' + value_type: json + - set: + target: header.x-mc-app-id + value: null + - set: + target: header.x-mc-date + value: '[[formatDate (now) "RFC1123"]]' + - set: + target: header.x-mc-req-id + value: '[[uuid]]' + - set: + fail_on_template_error: true + target: header.Authorization + value: MC :[[hmacBase64 "sha1" (base64Decode "") (sprintf "%s:%s:/api/ttp/impersonation/get-logs:" (.header.Get "x-mc-date") (.header.Get "x-mc-req-id"))]] + request.url: https://eu-api.mimecast.com/api/ttp/impersonation/get-logs + response.decode_as: application/json + response.pagination: + - set: + fail_on_template_error: true + target: body.meta.pagination.pageToken + value: '[[.last_response.body.meta.pagination.next]]' + response.split: + split: + target: body.impersonationLogs + target: body.data + tags: + - forwarded + - mimecast-ttp-ip + - condition: ${kubernetes.hints.mimecast.ttp_url_logs.enabled} == true or ${kubernetes.hints.mimecast.enabled} == true + config_version: "2" + cursor: + next_date: + value: '[[.first_event.date]]' + data_stream: + dataset: mimecast.ttp_url_logs + type: logs + interval: 5m + request.method: POST + request.transforms: + - set: + default: '[{"oldestFirst": false,"scanResult": "all","route":"all","to": "[[formatDate (now) "2006-01-02T15:04:05+0700"]]", "from":"[[formatDate (now (parseDuration "-5m")) "2006-01-02T15:04:05+0700"]]"}]' + target: body.data + value: '[{"oldestFirst": false,"scanResult": "all","route":"all","to": "[[formatDate (now) "2006-01-02T15:04:05+0700"]]", "from":"[[.cursor.next_date]]"}]' + value_type: json + - set: + target: header.x-mc-app-id + value: null + - set: + target: header.x-mc-date + value: '[[formatDate (now) "RFC1123"]]' + - set: + target: header.x-mc-req-id + value: '[[uuid]]' + - set: + fail_on_template_error: true + target: header.Authorization + value: MC :[[hmacBase64 "sha1" (base64Decode "") (sprintf "%s:%s:/api/ttp/url/get-logs:" (.header.Get "x-mc-date") (.header.Get "x-mc-req-id"))]] + request.url: https://eu-api.mimecast.com/api/ttp/url/get-logs + response.decode_as: application/json + response.pagination: + - set: + fail_on_template_error: true + target: body.meta.pagination.pageToken + value: '[[.last_response.body.meta.pagination.next]]' + response.split: + split: + target: body.clickLogs + target: body.data + tags: + - forwarded + - mimecast-ttp-url + data_stream.namespace: default + - name: filestream-mimecast + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.mimecast.container_logs.enabled} == true + data_stream: + dataset: kubernetes.container_logs + type: logs + exclude_files: [] + exclude_lines: [] + parsers: + - container: + format: auto + stream: all + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + prospector: + scanner: + symlinks: true + tags: [] + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/modsecurity.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/modsecurity.yml new file mode 100644 index 00000000000..cc9e109d5ed --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/modsecurity.yml @@ -0,0 +1,28 @@ +inputs: + - name: filestream-modsecurity + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.modsecurity.auditlog.enabled} == true or ${kubernetes.hints.modsecurity.enabled} == true + data_stream: + dataset: modsecurity.auditlog + type: logs + exclude_files: + - .gz$ + fields: + tz_offset: null + fields_under_root: true + parsers: + - container: + format: auto + stream: ${kubernetes.hints.modsecurity.auditlog.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + processors: + - add_locale: null + prospector: + scanner: + symlinks: true + tags: + - modsec-audit + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/mongodb.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/mongodb.yml new file mode 100644 index 00000000000..bf47b9628da --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/mongodb.yml @@ -0,0 +1,73 @@ +inputs: + - name: mongodb/metrics-mongodb + type: mongodb/metrics + use_output: default + streams: + - condition: ${kubernetes.hints.mongodb.collstats.enabled} == true or ${kubernetes.hints.mongodb.enabled} == true + data_stream: + dataset: mongodb.collstats + type: metrics + hosts: + - ${kubernetes.hints.mongodb.collstats.host|'localhost:27017'} + metricsets: + - collstats + period: ${kubernetes.hints.mongodb.collstats.period|'10s'} + - condition: ${kubernetes.hints.mongodb.dbstats.enabled} == true or ${kubernetes.hints.mongodb.enabled} == true + data_stream: + dataset: mongodb.dbstats + type: metrics + hosts: + - ${kubernetes.hints.mongodb.dbstats.host|'localhost:27017'} + metricsets: + - dbstats + period: ${kubernetes.hints.mongodb.dbstats.period|'10s'} + - condition: ${kubernetes.hints.mongodb.metrics.enabled} == true or ${kubernetes.hints.mongodb.enabled} == true + data_stream: + dataset: mongodb.metrics + type: metrics + hosts: + - ${kubernetes.hints.mongodb.metrics.host|'localhost:27017'} + metricsets: + - metrics + period: ${kubernetes.hints.mongodb.metrics.period|'10s'} + - condition: ${kubernetes.hints.mongodb.replstatus.enabled} == true or ${kubernetes.hints.mongodb.enabled} == true + data_stream: + dataset: mongodb.replstatus + type: metrics + hosts: + - ${kubernetes.hints.mongodb.replstatus.host|'localhost:27017'} + metricsets: + - replstatus + period: ${kubernetes.hints.mongodb.replstatus.period|'10s'} + - condition: ${kubernetes.hints.mongodb.status.enabled} == true or ${kubernetes.hints.mongodb.enabled} == true + data_stream: + dataset: mongodb.status + type: metrics + hosts: + - ${kubernetes.hints.mongodb.status.host|'localhost:27017'} + metricsets: + - status + period: ${kubernetes.hints.mongodb.status.period|'10s'} + data_stream.namespace: default + - name: filestream-mongodb + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.mongodb.log.enabled} == true or ${kubernetes.hints.mongodb.enabled} == true + data_stream: + dataset: mongodb.log + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.mongodb.log.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + prospector: + scanner: + symlinks: true + tags: + - mongodb-logs + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/mysql.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/mysql.yml new file mode 100644 index 00000000000..234caeeb40c --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/mysql.yml @@ -0,0 +1,82 @@ +inputs: + - name: filestream-mysql + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.mysql.error.enabled} == true or ${kubernetes.hints.mysql.enabled} == true + data_stream: + dataset: mysql.error + type: logs + exclude_files: + - .gz$ + multiline: + match: after + negate: true + pattern: ^([0-9]{4}-[0-9]{2}-[0-9]{2}|[0-9]{6}) + parsers: + - container: + format: auto + stream: ${kubernetes.hints.mysql.error.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + processors: + - add_locale: null + prospector: + scanner: + symlinks: true + - condition: ${kubernetes.hints.mysql.slowlog.enabled} == true or ${kubernetes.hints.mysql.enabled} == true + data_stream: + dataset: mysql.slowlog + type: logs + exclude_files: + - .gz$ + exclude_lines: + - '^[\/\w\.]+, Version: .* started with:.*' + - ^# Time:.* + multiline: + match: after + negate: true + pattern: '^(# User@Host: |# Time: )' + parsers: + - container: + format: auto + stream: ${kubernetes.hints.mysql.slowlog.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + prospector: + scanner: + symlinks: true + data_stream.namespace: default + - name: mysql/metrics-mysql + type: mysql/metrics + use_output: default + streams: + - condition: ${kubernetes.hints.mysql.galera_status.enabled} == true and ${kubernetes.hints.mysql.enabled} == true + data_stream: + dataset: mysql.galera_status + type: metrics + hosts: + - ${kubernetes.hints.mysql.galera_status.host|'tcp(127.0.0.1:3306)/'} + metricsets: + - galera_status + password: ${kubernetes.hints.mysql.galera_status.password|'test'} + period: ${kubernetes.hints.mysql.galera_status.period|'10s'} + username: ${kubernetes.hints.mysql.galera_status.username|'root'} + - condition: ${kubernetes.hints.mysql.performance.enabled} == true and ${kubernetes.hints.mysql.enabled} == true + data_stream: + dataset: mysql.performance + type: metrics + metricsets: + - performance + - condition: ${kubernetes.hints.mysql.status.enabled} == true or ${kubernetes.hints.mysql.enabled} == true + data_stream: + dataset: mysql.status + type: metrics + hosts: + - ${kubernetes.hints.mysql.status.host|'tcp(127.0.0.1:3306)/'} + metricsets: + - status + password: ${kubernetes.hints.mysql.status.password|'test'} + period: ${kubernetes.hints.mysql.status.period|'10s'} + username: ${kubernetes.hints.mysql.status.username|'root'} + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/mysql_enterprise.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/mysql_enterprise.yml new file mode 100644 index 00000000000..d943bb661ff --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/mysql_enterprise.yml @@ -0,0 +1,18 @@ +inputs: + - name: filestream-mysql_enterprise + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.mysql_enterprise.audit.enabled} == true or ${kubernetes.hints.mysql_enterprise.enabled} == true + data_stream: + dataset: mysql_enterprise.audit + type: logs + exclude_files: + - .gz$ + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + processors: + - add_locale: null + tags: + - mysql_enterprise-audit + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/nats.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/nats.yml new file mode 100644 index 00000000000..91525210374 --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/nats.yml @@ -0,0 +1,82 @@ +inputs: + - name: filestream-nats + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.nats.log.enabled} == true or ${kubernetes.hints.nats.enabled} == true + data_stream: + dataset: nats.log + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.nats.log.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + prospector: + scanner: + symlinks: true + tags: + - nats-log + data_stream.namespace: default + - name: nats/metrics-nats + type: nats/metrics + use_output: default + streams: + - condition: ${kubernetes.hints.nats.connection.enabled} == true and ${kubernetes.hints.nats.enabled} == true + data_stream: + dataset: nats.connection + type: metrics + hosts: + - ${kubernetes.hints.nats.connection.host|'localhost:8222'} + metricsets: + - connection + period: ${kubernetes.hints.nats.connection.period|'10s'} + - condition: ${kubernetes.hints.nats.connections.enabled} == true or ${kubernetes.hints.nats.enabled} == true + data_stream: + dataset: nats.connections + type: metrics + hosts: + - ${kubernetes.hints.nats.connections.host|'localhost:8222'} + metricsets: + - connections + period: ${kubernetes.hints.nats.connections.period|'10s'} + - condition: ${kubernetes.hints.nats.route.enabled} == true and ${kubernetes.hints.nats.enabled} == true + data_stream: + dataset: nats.route + type: metrics + hosts: + - ${kubernetes.hints.nats.route.host|'localhost:8222'} + metricsets: + - route + period: ${kubernetes.hints.nats.route.period|'10s'} + - condition: ${kubernetes.hints.nats.routes.enabled} == true or ${kubernetes.hints.nats.enabled} == true + data_stream: + dataset: nats.routes + type: metrics + hosts: + - ${kubernetes.hints.nats.routes.host|'localhost:8222'} + metricsets: + - routes + period: ${kubernetes.hints.nats.routes.period|'10s'} + - condition: ${kubernetes.hints.nats.stats.enabled} == true or ${kubernetes.hints.nats.enabled} == true + data_stream: + dataset: nats.stats + type: metrics + hosts: + - ${kubernetes.hints.nats.stats.host|'localhost:8222'} + metricsets: + - stats + period: ${kubernetes.hints.nats.stats.period|'10s'} + - condition: ${kubernetes.hints.nats.subscriptions.enabled} == true or ${kubernetes.hints.nats.enabled} == true + data_stream: + dataset: nats.subscriptions + type: metrics + hosts: + - ${kubernetes.hints.nats.subscriptions.host|'localhost:8222'} + metricsets: + - subscriptions + period: ${kubernetes.hints.nats.subscriptions.period|'10s'} + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/netflow.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/netflow.yml new file mode 100644 index 00000000000..d2bb80601df --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/netflow.yml @@ -0,0 +1,47 @@ +inputs: + - name: netflow-netflow + type: netflow + use_output: default + streams: + - condition: ${kubernetes.hints.netflow.log.enabled} == true or ${kubernetes.hints.netflow.enabled} == true + data_stream: + dataset: netflow.log + type: logs + detect_sequence_reset: true + expiration_timeout: 30m + host: localhost:2055 + max_message_size: 10KiB + protocols: + - v1 + - v5 + - v6 + - v7 + - v8 + - v9 + - ipfix + queue_size: 8192 + tags: + - netflow + - forwarded + data_stream.namespace: default + - name: filestream-netflow + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.netflow.container_logs.enabled} == true + data_stream: + dataset: kubernetes.container_logs + type: logs + exclude_files: [] + exclude_lines: [] + parsers: + - container: + format: auto + stream: all + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + prospector: + scanner: + symlinks: true + tags: [] + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/nginx.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/nginx.yml new file mode 100644 index 00000000000..f0c166bbfbb --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/nginx.yml @@ -0,0 +1,142 @@ +inputs: + - name: filestream-nginx + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.nginx.access.enabled} == true or ${kubernetes.hints.nginx.enabled} == true + data_stream: + dataset: nginx.access + type: logs + exclude_files: + - .gz$ + ignore_older: 72h + parsers: + - container: + format: auto + stream: ${kubernetes.hints.nginx.access.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + processors: + - add_locale: null + prospector: + scanner: + symlinks: true + tags: + - nginx-access + - condition: ${kubernetes.hints.nginx.error.enabled} == true or ${kubernetes.hints.nginx.enabled} == true + data_stream: + dataset: nginx.error + type: logs + exclude_files: + - .gz$ + ignore_older: 72h + multiline: + match: after + negate: true + pattern: '^\d{4}\/\d{2}\/\d{2} ' + parsers: + - container: + format: auto + stream: ${kubernetes.hints.nginx.error.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + processors: + - add_locale: null + prospector: + scanner: + symlinks: true + tags: + - nginx-error + data_stream.namespace: default + - name: httpjson-nginx + type: httpjson + use_output: default + streams: + - condition: ${kubernetes.hints.nginx.access.enabled} == true and ${kubernetes.hints.nginx.enabled} == true + config_version: 2 + cursor: + index_earliest: + value: '[[.last_event.result.max_indextime]]' + data_stream: + dataset: nginx.access + type: logs + interval: 10s + request.method: POST + request.transforms: + - set: + target: url.params.search + value: search sourcetype=nginx:plus:access | streamstats max(_indextime) AS max_indextime + - set: + target: url.params.output_mode + value: json + - set: + default: '[[(now (parseDuration "-10s")).Unix]]' + target: url.params.index_earliest + value: '[[ .cursor.index_earliest ]]' + - set: + target: url.params.index_latest + value: '[[(now).Unix]]' + - set: + target: header.Content-Type + value: application/x-www-form-urlencoded + request.url: https://server.example.com:8089/services/search/jobs/export + response.decode_as: application/x-ndjson + response.split: + delimiter: |4+ + target: body.result._raw + type: string + tags: + - forwarded + - nginx-access + - condition: ${kubernetes.hints.nginx.error.enabled} == true and ${kubernetes.hints.nginx.enabled} == true + config_version: 2 + cursor: + index_earliest: + value: '[[.last_event.result.max_indextime]]' + data_stream: + dataset: nginx.error + type: logs + interval: 10s + request.method: POST + request.transforms: + - set: + target: url.params.search + value: search sourcetype=nginx:plus:error | streamstats max(_indextime) AS max_indextime + - set: + target: url.params.output_mode + value: json + - set: + default: '[[(now (parseDuration "-10s")).Unix]]' + target: url.params.index_earliest + value: '[[ .cursor.index_earliest ]]' + - set: + target: url.params.index_latest + value: '[[(now).Unix]]' + - set: + target: header.Content-Type + value: application/x-www-form-urlencoded + request.url: https://server.example.com:8089/services/search/jobs/export + response.decode_as: application/x-ndjson + response.split: + delimiter: |4+ + target: body.result._raw + type: string + tags: + - forwarded + - nginx-error + data_stream.namespace: default + - name: nginx/metrics-nginx + type: nginx/metrics + use_output: default + streams: + - condition: ${kubernetes.hints.nginx.stubstatus.enabled} == true or ${kubernetes.hints.nginx.enabled} == true + data_stream: + dataset: nginx.stubstatus + type: metrics + hosts: + - ${kubernetes.hints.nginx.stubstatus.host|'http://127.0.0.1:80'} + metricsets: + - stubstatus + period: ${kubernetes.hints.nginx.stubstatus.period|'10s'} + server_status_path: /nginx_status + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/nginx_ingress_controller.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/nginx_ingress_controller.yml new file mode 100644 index 00000000000..5f9ba9bc7e4 --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/nginx_ingress_controller.yml @@ -0,0 +1,53 @@ +inputs: + - name: filestream-nginx_ingress_controller + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.nginx_ingress_controller.access.enabled} == true or ${kubernetes.hints.nginx_ingress_controller.enabled} == true + data_stream: + dataset: nginx_ingress_controller.access + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.nginx_ingress_controller.access.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + processors: + - add_locale: null + prospector: + scanner: + symlinks: true + tags: + - nginx-ingress-controller-access + - condition: ${kubernetes.hints.nginx_ingress_controller.error.enabled} == true or ${kubernetes.hints.nginx_ingress_controller.enabled} == true + data_stream: + dataset: nginx_ingress_controller.error + type: logs + exclude_files: + - .gz$ + multiline: + match: after + negate: true + pattern: '^[A-Z]{1}[0-9]{4} ' + parsers: + - container: + format: auto + stream: ${kubernetes.hints.nginx_ingress_controller.error.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + processors: + - drop_event: + when: + not: + regexp: + message: '^[A-Z]{1}[0-9]{4} ' + - add_locale: null + prospector: + scanner: + symlinks: true + tags: + - nginx-ingress-controller-error + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/oracle.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/oracle.yml new file mode 100644 index 00000000000..8e846586d4b --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/oracle.yml @@ -0,0 +1,82 @@ +inputs: + - name: filestream-oracle + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.oracle.database_audit.enabled} == true or ${kubernetes.hints.oracle.enabled} == true + data_stream: + dataset: oracle.database_audit + type: logs + exclude_files: + - .gz$ + exclude_lines: + - ^Audit file + parsers: + - multiline: + match: after + negate: true + pattern: ^[A-Za-z]{3}\s+[A-Za-z]{3}\s+[0-9]{1,2}\s[0-9]{2}:[0-9]{2}:[0-9]{2}\s[0-9]{4}\s\S[0-9]{2}:[0-9]{2} + timeout: 10 + type: pattern + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + processors: + - add_locale: null + tags: + - oracle-database_audit + data_stream.namespace: default + - name: sql/metrics-oracle + type: sql/metrics + use_output: default + streams: + - condition: ${kubernetes.hints.oracle.performance.enabled} == true and ${kubernetes.hints.oracle.enabled} == true + data_stream: + dataset: oracle.performance + type: metrics + driver: oracle + hosts: + - ${kubernetes.hints.oracle.performance.host|'oracle://sys:Oradoc_db1@0.0.0.0:1521/ORCLCDB.localdomain?sysdba=1'} + metricsets: + - query + period: ${kubernetes.hints.oracle.performance.period|'60s'} + raw_data.enabled: true + sql_queries: + - query: SELECT name, physical_reads, db_block_gets, consistent_gets, 1 - (physical_reads / (db_block_gets + consistent_gets)) "Hit_Ratio" FROM V$BUFFER_POOL_STATISTICS + response_format: table + - query: SELECT sum(a.value) total_cur, avg(a.value) avg_cur, max(a.value) max_cur, S.username, s.machine FROM v$sesstat a, v$statname b, v$session s WHERE a.statistic# = b.statistic# AND s.sid = a.sid GROUP BY s.username, s.machine + response_format: table + - query: SELECT total_cursors, current_cursors, sess_cur_cache_hits, parse_count_total, sess_cur_cache_hits / total_cursors as cachehits_totalcursors_ratio , sess_cur_cache_hits - parse_count_total as real_parses FROM ( SELECT sum ( decode ( name, 'opened cursors cumulative', value, 0)) total_cursors, sum ( decode ( name, 'opened cursors current',value,0)) current_cursors, sum ( decode ( name, 'session cursor cache hits',value,0)) sess_cur_cache_hits, sum ( decode ( name, 'parse count (total)',value,0)) parse_count_total FROM v$sysstat WHERE name IN ( 'opened cursors cumulative','opened cursors current','session cursor cache hits', 'parse count (total)' )) + response_format: table + - query: SELECT 'lock_requests' "Ratio" , AVG(gethitratio) FROM V$LIBRARYCACHE UNION SELECT 'pin_requests' "Ratio", AVG(pinhitratio) FROM V$LIBRARYCACHE UNION SELECT 'io_reloads' "Ratio", (SUM(reloads) / SUM(pins)) FROM V$LIBRARYCACHE + response_format: variables + - condition: ${kubernetes.hints.oracle.sysmetric.enabled} == true and ${kubernetes.hints.oracle.enabled} == true + data_stream: + dataset: oracle.sysmetric + type: metrics + driver: oracle + dynamic_metric_name_filter: '%' + hosts: + - ${kubernetes.hints.oracle.sysmetric.host|'oracle://sys:Oradoc_db1@0.0.0.0:1521/ORCLCDB.localdomain?sysdba=1'} + metricsets: + - query + period: ${kubernetes.hints.oracle.sysmetric.period|'60s'} + raw_data.enabled: true + sql_queries: + - query: SELECT METRIC_NAME, VALUE FROM V$SYSMETRIC WHERE GROUP_ID = 2 and METRIC_NAME LIKE '%' + response_format: variables + - condition: ${kubernetes.hints.oracle.tablespace.enabled} == true and ${kubernetes.hints.oracle.enabled} == true + data_stream: + dataset: oracle.tablespace + type: metrics + driver: oracle + dynamic_metric_name_filter: "" + hosts: + - ${kubernetes.hints.oracle.tablespace.host|'oracle://sys:Oradoc_db1@0.0.0.0:1521/ORCLCDB.localdomain?sysdba=1'} + metricsets: + - query + period: ${kubernetes.hints.oracle.tablespace.period|'60s'} + raw_data.enabled: true + sql_queries: + - query: WITH data_files AS (SELECT file_name, file_id, tablespace_name, bytes, status, maxbytes, user_bytes, online_status FROM sys.dba_data_files UNION SELECT file_name, file_id, tablespace_name, bytes, status, maxbytes, user_bytes, status AS ONLINE_STATUS FROM sys.dba_temp_files), spaces AS (SELECT b.tablespace_name TB_NAME, tbs_size TB_SIZE_USED, a.free_space TB_SIZE_FREE FROM (SELECT tablespace_name, SUM(bytes) AS free_space FROM dba_free_space GROUP BY tablespace_name) a, (SELECT tablespace_name, SUM(bytes) AS tbs_size FROM dba_data_files GROUP BY tablespace_name) b WHERE a.tablespace_name(+) = b.tablespace_name AND a.tablespace_name != 'TEMP'), temp_spaces AS (SELECT tablespace_name, tablespace_size, allocated_space, free_space FROM dba_temp_free_space WHERE tablespace_name = 'TEMP'), details AS (SELECT df.file_name, df.file_id, df.tablespace_name, df.bytes, df.status, df.maxbytes, df.user_bytes, df.online_status, sp.tb_size_used, sp.tb_size_free FROM data_files df, spaces sp WHERE df.tablespace_name = sp.tb_name UNION SELECT df.file_name, df.file_id, df.tablespace_name, df.bytes, df.status, df.maxbytes, df.user_bytes, df.online_status, tsp.tablespace_size - tsp.free_space AS TB_SIZE_USED, tsp.free_space AS TB_SIZE_FREE FROM data_files df, temp_spaces tsp WHERE df.tablespace_name = tsp.tablespace_name) SELECT file_name, file_id, tablespace_name, bytes, status, maxbytes, user_bytes, online_status, tb_size_used, tb_size_free, SUM(bytes) over() AS TOTAL_BYTES FROM details + response_format: table + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/osquery.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/osquery.yml new file mode 100644 index 00000000000..6ebd2f12c46 --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/osquery.yml @@ -0,0 +1,23 @@ +inputs: + - name: filestream-osquery + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.osquery.result.enabled} == true or ${kubernetes.hints.osquery.enabled} == true + data_stream: + dataset: osquery.result + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.osquery.result.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + prospector: + scanner: + symlinks: true + tags: + - osquery + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/osquery_manager.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/osquery_manager.yml new file mode 100644 index 00000000000..6620de9c7de --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/osquery_manager.yml @@ -0,0 +1,33 @@ +inputs: + - name: osquery-osquery_manager + type: osquery + use_output: default + streams: + - condition: ${kubernetes.hints.osquery_manager.result.enabled} == true or ${kubernetes.hints.osquery_manager.enabled} == true + data_stream: + dataset: osquery_manager.result + type: logs + id: null + query: null + data_stream.namespace: default + - name: filestream-osquery_manager + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.osquery_manager.container_logs.enabled} == true + data_stream: + dataset: kubernetes.container_logs + type: logs + exclude_files: [] + exclude_lines: [] + parsers: + - container: + format: auto + stream: all + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + prospector: + scanner: + symlinks: true + tags: [] + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/panw.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/panw.yml new file mode 100644 index 00000000000..93c07883f03 --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/panw.yml @@ -0,0 +1,94 @@ +inputs: + - name: tcp-panw + type: tcp + use_output: default + streams: + - condition: ${kubernetes.hints.panw.panos.enabled} == true or ${kubernetes.hints.panw.enabled} == true + data_stream: + dataset: panw.panos + type: logs + host: localhost:9001 + max_message_size: 50KiB + processors: + - add_locale: null + - syslog: + field: message + format: auto + timezone: Local + - add_fields: + fields: + internal_zones: + - trust + target: _conf + - add_fields: + fields: + external_zones: + - untrust + target: _conf + tags: + - panw-panos + - forwarded + data_stream.namespace: default + - name: udp-panw + type: udp + use_output: default + streams: + - condition: ${kubernetes.hints.panw.panos.enabled} == true or ${kubernetes.hints.panw.enabled} == true + data_stream: + dataset: panw.panos + type: logs + host: localhost:9001 + max_message_size: 50KiB + processors: + - add_locale: null + - syslog: + field: message + format: auto + timezone: Local + - add_fields: + fields: + internal_zones: + - trust + target: _conf + - add_fields: + fields: + external_zones: + - untrust + target: _conf + tags: + - panw-panos + - forwarded + data_stream.namespace: default + - name: filestream-panw + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.panw.panos.enabled} == true or ${kubernetes.hints.panw.enabled} == true + data_stream: + dataset: panw.panos + type: logs + exclude_files: + - .gz$ + fields: + _conf: + external_zones: + - untrust + internal_zones: + - trust + tz_offset: local + fields_under_root: true + parsers: + - container: + format: auto + stream: ${kubernetes.hints.panw.panos.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + processors: + - add_locale: null + prospector: + scanner: + symlinks: true + tags: + - panw-panos + - forwarded + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/panw_cortex_xdr.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/panw_cortex_xdr.yml new file mode 100644 index 00000000000..ec6a58fd9b2 --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/panw_cortex_xdr.yml @@ -0,0 +1,90 @@ +inputs: + - name: httpjson-panw_cortex_xdr + type: httpjson + use_output: default + streams: + - condition: ${kubernetes.hints.panw_cortex_xdr.alerts.enabled} == true or ${kubernetes.hints.panw_cortex_xdr.enabled} == true + config_version: "2" + cursor: + next_ts: + value: '[[.last_event.detection_timestamp]]' + data_stream: + dataset: panw_cortex_xdr.alerts + type: logs + interval: 5m + request.method: POST + request.rate_limit: + limit: '[[.last_response.header.Get "X-Rate-Limit-Limit"]]' + remaining: '[[.last_response.header.Get "X-Rate-Limit-Remaining"]]' + reset: '[[(parseDate (.last_response.header.Get "X-Rate-Limit-Reset")).Unix]]' + request.timeout: 30s + request.transforms: + - set: + target: header.Authorization + value: null + - set: + target: header.x-xdr-auth-id + value: 1 + - set: + target: body.request_data.sort.field + value: creation_time + - set: + target: body.request_data.sort.keyword + value: asc + - append: + default: |- + { + "field": "creation_time", + "operator": "gte", + "value": [[ mul (add (now (parseDuration "-24h")).Unix) 1000 ]] + } + target: body.request_data.filters + value: |- + { + "field": "creation_time", + "operator": "gte", + "value": [[ .cursor.next_ts ]] + } + value_type: json + request.url: https://test.xdr.eu.paloaltonetworks.com/public_api/v1/alerts/get_alerts_multi_events + response.pagination: + - set: + fail_on_template_error: true + target: body.request_data.search_from + value: '[[if (ne (len .last_response.body.reply.alerts) 0)]][[mul .last_response.page 100]][[end]]' + value_type: int + - set: + fail_on_template_error: true + target: body.request_data.search_to + value: '[[if (ne (len .last_response.body.reply.alerts) 0)]][[add (mul .last_response.page 100) 100]][[end]]' + value_type: int + response.split: + split: + keep_parent: true + target: body.events + target: body.reply.alerts + tags: + - forwarded + - panw_cortex_xdr + data_stream.namespace: default + - name: filestream-panw_cortex_xdr + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.panw_cortex_xdr.container_logs.enabled} == true + data_stream: + dataset: kubernetes.container_logs + type: logs + exclude_files: [] + exclude_lines: [] + parsers: + - container: + format: auto + stream: all + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + prospector: + scanner: + symlinks: true + tags: [] + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/pfsense.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/pfsense.yml new file mode 100644 index 00000000000..e4541f90639 --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/pfsense.yml @@ -0,0 +1,62 @@ +inputs: + - name: udp-pfsense + type: udp + use_output: default + streams: + - condition: ${kubernetes.hints.pfsense.log.enabled} == true or ${kubernetes.hints.pfsense.enabled} == true + data_stream: + dataset: pfsense.log + type: logs + host: localhost:9001 + processors: + - add_locale: null + - add_fields: + fields: + internal_networks: + - private + tz_offset: local + target: _tmp + tags: + - pfsense + - forwarded + data_stream.namespace: default + - name: tcp-pfsense + type: tcp + use_output: default + streams: + - condition: ${kubernetes.hints.pfsense.log.enabled} == true and ${kubernetes.hints.pfsense.enabled} == true + data_stream: + dataset: pfsense.log + type: logs + host: localhost:9001 + processors: + - add_locale: null + - add_fields: + fields: + tz_offset: local + target: _tmp + tags: + - pfsense + - forwarded + data_stream.namespace: default + - name: filestream-pfsense + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.pfsense.container_logs.enabled} == true + data_stream: + dataset: kubernetes.container_logs + type: logs + exclude_files: [] + exclude_lines: [] + parsers: + - container: + format: auto + stream: all + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + prospector: + scanner: + symlinks: true + tags: [] + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/postgresql.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/postgresql.yml new file mode 100644 index 00000000000..a9abf518a9a --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/postgresql.yml @@ -0,0 +1,68 @@ +inputs: + - name: filestream-postgresql + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.postgresql.log.enabled} == true or ${kubernetes.hints.postgresql.enabled} == true + data_stream: + dataset: postgresql.log + type: logs + exclude_files: + - .gz$ + multiline: + match: after + negate: true + pattern: '^\d{4}-\d{2}-\d{2} ' + parsers: + - container: + format: auto + stream: ${kubernetes.hints.postgresql.log.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + prospector: + scanner: + symlinks: true + tags: + - postgresql-log + data_stream.namespace: default + - name: postgresql/metrics-postgresql + type: postgresql/metrics + use_output: default + streams: + - condition: ${kubernetes.hints.postgresql.activity.enabled} == true or ${kubernetes.hints.postgresql.enabled} == true + data_stream: + dataset: postgresql.activity + type: metrics + hosts: + - ${kubernetes.hints.postgresql.activity.host|'postgres://localhost:5432'} + metricsets: + - activity + period: ${kubernetes.hints.postgresql.activity.period|'10s'} + - condition: ${kubernetes.hints.postgresql.bgwriter.enabled} == true or ${kubernetes.hints.postgresql.enabled} == true + data_stream: + dataset: postgresql.bgwriter + type: metrics + hosts: + - ${kubernetes.hints.postgresql.bgwriter.host|'postgres://localhost:5432'} + metricsets: + - bgwriter + period: ${kubernetes.hints.postgresql.bgwriter.period|'10s'} + - condition: ${kubernetes.hints.postgresql.database.enabled} == true or ${kubernetes.hints.postgresql.enabled} == true + data_stream: + dataset: postgresql.database + type: metrics + hosts: + - ${kubernetes.hints.postgresql.database.host|'postgres://localhost:5432'} + metricsets: + - database + period: ${kubernetes.hints.postgresql.database.period|'10s'} + - condition: ${kubernetes.hints.postgresql.statement.enabled} == true or ${kubernetes.hints.postgresql.enabled} == true + data_stream: + dataset: postgresql.statement + type: metrics + hosts: + - ${kubernetes.hints.postgresql.statement.host|'postgres://localhost:5432'} + metricsets: + - statement + period: ${kubernetes.hints.postgresql.statement.period|'10s'} + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/prometheus.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/prometheus.yml new file mode 100644 index 00000000000..2a7e630c9cf --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/prometheus.yml @@ -0,0 +1,90 @@ +inputs: + - name: prometheus/metrics-prometheus + type: prometheus/metrics + use_output: default + streams: + - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + condition: ${kubernetes.hints.prometheus.collector.enabled} == true or ${kubernetes.hints.prometheus.enabled} == true + data_stream: + dataset: prometheus.collector + type: metrics + hosts: + - ${kubernetes.hints.prometheus.collector.host|'localhost:9090'} + metrics_filters.exclude: null + metrics_filters.include: null + metrics_path: /metrics + metricsets: + - collector + password: ${kubernetes.hints.prometheus.collector.password|'secret'} + period: ${kubernetes.hints.prometheus.collector.period|'10s'} + rate_counters: true + ssl.certificate_authorities: + - /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt + use_types: true + username: ${kubernetes.hints.prometheus.collector.username|'user'} + - condition: ${kubernetes.hints.prometheus.query.enabled} == true and ${kubernetes.hints.prometheus.enabled} == true + data_stream: + dataset: prometheus.query + type: metrics + hosts: + - ${kubernetes.hints.prometheus.query.host|'localhost:9090'} + metricsets: + - query + period: ${kubernetes.hints.prometheus.query.period|'10s'} + queries: + - name: instant_vector + params: + query: sum(rate(prometheus_http_requests_total[1m])) + path: /api/v1/query + - name: range_vector + params: + end: "2019-12-21T00:00:00.000Z" + query: up + start: "2019-12-20T00:00:00.000Z" + step: 1h + path: /api/v1/query_range + - name: scalar + params: + query: "100" + path: /api/v1/query + - name: string + params: + query: some_value + path: /api/v1/query + - condition: ${kubernetes.hints.prometheus.remote_write.enabled} == true and ${kubernetes.hints.prometheus.enabled} == true + data_stream: + dataset: prometheus.remote_write + type: metrics + host: localhost + metricsets: + - remote_write + port: 9201 + rate_counters: true + ssl.certificate: /etc/pki/server/cert.pem + ssl.enabled: null + ssl.key: null + types_patterns.exclude: null + types_patterns.include: null + use_types: true + data_stream.namespace: default + - name: filestream-prometheus + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.prometheus.container_logs.enabled} == true + data_stream: + dataset: kubernetes.container_logs + type: logs + exclude_files: [] + exclude_lines: [] + parsers: + - container: + format: auto + stream: all + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + prospector: + scanner: + symlinks: true + tags: [] + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/qnap_nas.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/qnap_nas.yml new file mode 100644 index 00000000000..a7358abd781 --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/qnap_nas.yml @@ -0,0 +1,60 @@ +inputs: + - name: filestream-qnap_nas + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.qnap_nas.container_logs.enabled} == true + data_stream: + dataset: kubernetes.container_logs + type: logs + exclude_files: [] + exclude_lines: [] + parsers: + - container: + format: auto + stream: all + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + prospector: + scanner: + symlinks: true + tags: [] + data_stream.namespace: default + - name: tcp-qnap_nas + type: tcp + use_output: default + streams: + - condition: ${kubernetes.hints.qnap_nas.log.enabled} == true or ${kubernetes.hints.qnap_nas.enabled} == true + data_stream: + dataset: qnap_nas.log + type: logs + host: localhost:9301 + processors: + - add_locale: null + - add_fields: + fields: + tz_offset: local + target: _tmp + tags: + - qnap-nas + - forwarded + data_stream.namespace: default + - name: udp-qnap_nas + type: udp + use_output: default + streams: + - condition: ${kubernetes.hints.qnap_nas.log.enabled} == true and ${kubernetes.hints.qnap_nas.enabled} == true + data_stream: + dataset: qnap_nas.log + type: logs + host: localhost:9301 + processors: + - add_locale: null + - add_fields: + fields: + tz_offset: local + target: _tmp + tags: + - qnap-nas + - forwarded + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/rabbitmq.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/rabbitmq.yml new file mode 100644 index 00000000000..942c4fa6911 --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/rabbitmq.yml @@ -0,0 +1,79 @@ +inputs: + - name: filestream-rabbitmq + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.rabbitmq.log.enabled} == true or ${kubernetes.hints.rabbitmq.enabled} == true + data_stream: + dataset: rabbitmq.log + type: logs + exclude_files: + - .gz$ + multiline: + match: after + negate: true + pattern: '[0-9]{4}-[0-9]{2}-[0-9]{2}' + parsers: + - container: + format: auto + stream: ${kubernetes.hints.rabbitmq.log.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + processors: + - add_locale: null + prospector: + scanner: + symlinks: true + tags: + - forwarded + data_stream.namespace: default + - name: rabbitmq/metrics-rabbitmq + type: rabbitmq/metrics + use_output: default + streams: + - condition: ${kubernetes.hints.rabbitmq.connection.enabled} == true or ${kubernetes.hints.rabbitmq.enabled} == true + data_stream: + dataset: rabbitmq.connection + type: metrics + hosts: + - ${kubernetes.hints.rabbitmq.connection.host|'localhost:15672'} + metricsets: + - connection + password: ${kubernetes.hints.rabbitmq.connection.password|''} + period: ${kubernetes.hints.rabbitmq.connection.period|'10s'} + username: ${kubernetes.hints.rabbitmq.connection.username|''} + - condition: ${kubernetes.hints.rabbitmq.exchange.enabled} == true or ${kubernetes.hints.rabbitmq.enabled} == true + data_stream: + dataset: rabbitmq.exchange + type: metrics + hosts: + - ${kubernetes.hints.rabbitmq.exchange.host|'localhost:15672'} + metricsets: + - exchange + password: ${kubernetes.hints.rabbitmq.exchange.password|''} + period: ${kubernetes.hints.rabbitmq.exchange.period|'10s'} + username: ${kubernetes.hints.rabbitmq.exchange.username|''} + - condition: ${kubernetes.hints.rabbitmq.node.enabled} == true or ${kubernetes.hints.rabbitmq.enabled} == true + data_stream: + dataset: rabbitmq.node + type: metrics + hosts: + - ${kubernetes.hints.rabbitmq.node.host|'localhost:15672'} + metricsets: + - node + node.collect: node + password: ${kubernetes.hints.rabbitmq.node.password|''} + period: ${kubernetes.hints.rabbitmq.node.period|'10s'} + username: ${kubernetes.hints.rabbitmq.node.username|''} + - condition: ${kubernetes.hints.rabbitmq.queue.enabled} == true or ${kubernetes.hints.rabbitmq.enabled} == true + data_stream: + dataset: rabbitmq.queue + type: metrics + hosts: + - ${kubernetes.hints.rabbitmq.queue.host|'localhost:15672'} + metricsets: + - queue + password: ${kubernetes.hints.rabbitmq.queue.password|''} + period: ${kubernetes.hints.rabbitmq.queue.period|'10s'} + username: ${kubernetes.hints.rabbitmq.queue.username|''} + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/redis.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/redis.yml new file mode 100644 index 00000000000..31731f6c1a5 --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/redis.yml @@ -0,0 +1,84 @@ +inputs: + - name: filestream-redis + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.redis.log.enabled} == true or ${kubernetes.hints.redis.enabled} == true + data_stream: + dataset: redis.log + type: logs + exclude_files: + - .gz$ + exclude_lines: + - ^\s+[\-`('.|_] + parsers: + - container: + format: auto + stream: ${kubernetes.hints.redis.log.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + prospector: + scanner: + symlinks: true + tags: + - redis-log + data_stream.namespace: default + - name: redis-redis + type: redis + use_output: default + streams: + - condition: ${kubernetes.hints.redis.slowlog.enabled} == true or ${kubernetes.hints.redis.enabled} == true + data_stream: + dataset: redis.slowlog + type: logs + hosts: + - ${kubernetes.hints.redis.slowlog.host|'127.0.0.1:6379'} + password: ${kubernetes.hints.redis.slowlog.password|''} + data_stream.namespace: default + - name: redis/metrics-redis + type: redis/metrics + use_output: default + streams: + - condition: ${kubernetes.hints.redis.info.enabled} == true or ${kubernetes.hints.redis.enabled} == true + data_stream: + dataset: redis.info + type: metrics + hosts: + - ${kubernetes.hints.redis.info.host|'127.0.0.1:6379'} + idle_timeout: 20s + maxconn: 10 + metricsets: + - info + network: tcp + password: ${kubernetes.hints.redis.info.password|''} + period: ${kubernetes.hints.redis.info.period|'10s'} + - condition: ${kubernetes.hints.redis.key.enabled} == true or ${kubernetes.hints.redis.enabled} == true + data_stream: + dataset: redis.key + type: metrics + hosts: + - ${kubernetes.hints.redis.key.host|'127.0.0.1:6379'} + idle_timeout: 20s + key.patterns: + - limit: 20 + pattern: '*' + maxconn: 10 + metricsets: + - key + network: tcp + password: ${kubernetes.hints.redis.key.password|''} + period: ${kubernetes.hints.redis.key.period|'10s'} + - condition: ${kubernetes.hints.redis.keyspace.enabled} == true or ${kubernetes.hints.redis.enabled} == true + data_stream: + dataset: redis.keyspace + type: metrics + hosts: + - ${kubernetes.hints.redis.keyspace.host|'127.0.0.1:6379'} + idle_timeout: 20s + maxconn: 10 + metricsets: + - keyspace + network: tcp + password: ${kubernetes.hints.redis.keyspace.password|''} + period: ${kubernetes.hints.redis.keyspace.period|'10s'} + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/santa.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/santa.yml new file mode 100644 index 00000000000..d60bfeb744a --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/santa.yml @@ -0,0 +1,23 @@ +inputs: + - name: filestream-santa + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.santa.log.enabled} == true or ${kubernetes.hints.santa.enabled} == true + data_stream: + dataset: santa.log + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.santa.log.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + prospector: + scanner: + symlinks: true + tags: + - santa-log + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/security_detection_engine.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/security_detection_engine.yml new file mode 100644 index 00000000000..990a4372e8b --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/security_detection_engine.yml @@ -0,0 +1,22 @@ +inputs: + - name: filestream-security_detection_engine + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.security_detection_engine.container_logs.enabled} == true + data_stream: + dataset: kubernetes.container_logs + type: logs + exclude_files: [] + exclude_lines: [] + parsers: + - container: + format: auto + stream: all + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + prospector: + scanner: + symlinks: true + tags: [] + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/sentinel_one.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/sentinel_one.yml new file mode 100644 index 00000000000..dcd117dc994 --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/sentinel_one.yml @@ -0,0 +1,217 @@ +inputs: + - name: filestream-sentinel_one + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.sentinel_one.container_logs.enabled} == true + data_stream: + dataset: kubernetes.container_logs + type: logs + exclude_files: [] + exclude_lines: [] + parsers: + - container: + format: auto + stream: all + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + prospector: + scanner: + symlinks: true + tags: [] + data_stream.namespace: default + - name: httpjson-sentinel_one + type: httpjson + use_output: default + streams: + - condition: ${kubernetes.hints.sentinel_one.activity.enabled} == true or ${kubernetes.hints.sentinel_one.enabled} == true + config_version: 2 + cursor: + last_create_at: + value: '[[.last_event.createdAt]]' + data_stream: + dataset: sentinel_one.activity + type: logs + interval: 1m + request.method: GET + request.transforms: + - set: + target: header.Authorization + value: 'ApiToken ' + - set: + target: url.params.limit + value: "100" + - set: + target: url.params.sortBy + value: createdAt + - set: + target: url.params.sortOrder + value: asc + - set: + default: '[[formatDate (now (parseDuration "-24h"))]]' + target: url.params.createdAt__gte + value: '[[formatDate (parseDate .cursor.last_create_at)]]' + request.url: /web/api/v2.1/activities + response.pagination: + - set: + fail_on_template_error: true + target: url.params.cursor + value: '[[if (ne .last_response.body.pagination.nextCursor nil)]][[.last_response.body.pagination.nextCursor]][[else]][[.last_response.terminate_pagination]][[end]]' + response.split: + target: body.data + tags: + - forwarded + - sentinel_one-activity + - condition: ${kubernetes.hints.sentinel_one.agent.enabled} == true or ${kubernetes.hints.sentinel_one.enabled} == true + config_version: 2 + cursor: + last_update_at: + value: '[[.last_event.updatedAt]]' + data_stream: + dataset: sentinel_one.agent + type: logs + interval: 5m + request.method: GET + request.transforms: + - set: + target: header.Authorization + value: 'ApiToken ' + - set: + target: url.params.limit + value: "100" + - set: + target: url.params.sortBy + value: updatedAt + - set: + target: url.params.sortOrder + value: asc + - set: + default: '[[formatDate (now (parseDuration "-24h"))]]' + target: url.params.updatedAt__gte + value: '[[formatDate (parseDate .cursor.last_update_at)]]' + request.url: /web/api/v2.1/agents + response.pagination: + - set: + fail_on_template_error: true + target: url.params.cursor + value: '[[if (ne .last_response.body.pagination.nextCursor nil)]][[.last_response.body.pagination.nextCursor]][[else]][[.last_response.terminate_pagination]][[end]]' + response.split: + target: body.data + tags: + - forwarded + - sentinel_one-agent + - condition: ${kubernetes.hints.sentinel_one.alert.enabled} == true or ${kubernetes.hints.sentinel_one.enabled} == true + config_version: 2 + cursor: + last_create_at: + value: '[[.last_event.alertInfo.createdAt]]' + data_stream: + dataset: sentinel_one.alert + type: logs + interval: 5m + request.method: GET + request.transforms: + - set: + target: header.Authorization + value: 'ApiToken ' + - set: + target: url.params.limit + value: "100" + - set: + target: url.params.sortBy + value: alertInfoCreatedAt + - set: + target: url.params.sortOrder + value: asc + - set: + default: '[[formatDate (now (parseDuration "-24h"))]]' + target: url.params.createdAt__gte + value: '[[formatDate (parseDate .cursor.last_create_at)]]' + request.url: /web/api/v2.1/cloud-detection/alerts + response.pagination: + - set: + fail_on_template_error: true + target: url.params.cursor + value: '[[if (ne .last_response.body.pagination.nextCursor nil)]][[.last_response.body.pagination.nextCursor]][[else]][[.last_response.terminate_pagination]][[end]]' + response.split: + target: body.data + tags: + - forwarded + - sentinel_one-alert + - condition: ${kubernetes.hints.sentinel_one.group.enabled} == true or ${kubernetes.hints.sentinel_one.enabled} == true + config_version: 2 + cursor: + last_update_at: + value: '[[.last_event.updatedAt]]' + data_stream: + dataset: sentinel_one.group + type: logs + interval: 5m + request.method: GET + request.transforms: + - set: + target: header.Authorization + value: 'ApiToken ' + - set: + target: url.params.limit + value: "100" + - set: + target: url.params.sortBy + value: updatedAt + - set: + target: url.params.sortOrder + value: asc + - set: + default: '[[formatDate (now (parseDuration "-24h"))]]' + target: url.params.updatedAt__gte + value: '[[formatDate (parseDate .cursor.last_update_at)]]' + request.url: /web/api/v2.1/groups + response.pagination: + - set: + fail_on_template_error: true + target: url.params.cursor + value: '[[if (ne .last_response.body.pagination.nextCursor nil)]][[.last_response.body.pagination.nextCursor]][[else]][[.last_response.terminate_pagination]][[end]]' + response.split: + target: body.data + tags: + - forwarded + - sentinel_one-group + - condition: ${kubernetes.hints.sentinel_one.threat.enabled} == true or ${kubernetes.hints.sentinel_one.enabled} == true + config_version: 2 + cursor: + last_update_at: + value: '[[.last_event.threatInfo.updatedAt]]' + data_stream: + dataset: sentinel_one.threat + type: logs + interval: 5m + request.method: GET + request.transforms: + - set: + target: header.Authorization + value: 'ApiToken ' + - set: + target: url.params.limit + value: "100" + - set: + target: url.params.sortBy + value: updatedAt + - set: + target: url.params.sortOrder + value: asc + - set: + default: '[[formatDate (now (parseDuration "-24h"))]]' + target: url.params.updatedAt__gte + value: '[[formatDate (parseDate .cursor.last_update_at)]]' + request.url: /web/api/v2.1/threats + response.pagination: + - set: + fail_on_template_error: true + target: url.params.cursor + value: '[[if (ne .last_response.body.pagination.nextCursor nil)]][[.last_response.body.pagination.nextCursor]][[else]][[.last_response.terminate_pagination]][[end]]' + response.split: + target: body.data + tags: + - forwarded + - sentinel_one-threat + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/snort.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/snort.yml new file mode 100644 index 00000000000..80ed6df384a --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/snort.yml @@ -0,0 +1,53 @@ +inputs: + - name: filestream-snort + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.snort.log.enabled} == true or ${kubernetes.hints.snort.enabled} == true + data_stream: + dataset: snort.log + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.snort.log.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + processors: + - add_locale: null + - add_fields: + fields: + internal_networks: + - private + tz_offset: local + target: _tmp + prospector: + scanner: + symlinks: true + tags: + - forwarded + - snort.log + data_stream.namespace: default + - name: udp-snort + type: udp + use_output: default + streams: + - condition: ${kubernetes.hints.snort.log.enabled} == true or ${kubernetes.hints.snort.enabled} == true + data_stream: + dataset: snort.log + type: logs + host: localhost:9514 + processors: + - add_locale: null + - add_fields: + fields: + internal_networks: + - private + tz_offset: local + target: _tmp + tags: + - forwarded + - snort.log + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/snyk.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/snyk.yml new file mode 100644 index 00000000000..aef353751ec --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/snyk.yml @@ -0,0 +1,139 @@ +inputs: + - name: filestream-snyk + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.snyk.container_logs.enabled} == true + data_stream: + dataset: kubernetes.container_logs + type: logs + exclude_files: [] + exclude_lines: [] + parsers: + - container: + format: auto + stream: all + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + prospector: + scanner: + symlinks: true + tags: [] + data_stream.namespace: default + - name: httpjson-snyk + type: httpjson + use_output: default + streams: + - condition: ${kubernetes.hints.snyk.audit.enabled} == true or ${kubernetes.hints.snyk.enabled} == true + config_version: 2 + cursor: + interval: + value: -24h + data_stream: + dataset: snyk.audit + type: logs + interval: 10s + request.body: + filters: null + request.method: POST + request.transforms: + - set: + target: header.Authorization + value: token + - set: + target: url.params.to + value: '[[ formatDate (now) "2006-01-02" ]]' + - set: + default: '[[ formatDate (now (parseDuration "-720h")) "2006-01-02" ]]' + target: url.params.from + value: '[[ formatDate (now (parseDuration .cursor.interval)) "2006-01-02" ]]' + request.url: https://snyk.io/api/v1/org//audit?page=1&sortOrder=ASC + response.pagination: + - set: + fail_on_template_error: true + target: url.params.page + value: '[[if (ne (len .last_response.body.response) 0)]][[add .last_response.page 1]][[end]]' + response.request_body_on_pagination: true + tags: + - forwarded + - snyk-audit + - condition: ${kubernetes.hints.snyk.vulnerabilities.enabled} == true or ${kubernetes.hints.snyk.enabled} == true + config_version: 2 + cursor: + interval: + value: -24h + data_stream: + dataset: snyk.vulnerabilities + type: logs + interval: 24h + request.body: + filters: + exploitMaturity: + - mature + - proof-of-concept + - no-known-exploit + - no-data + fixable: false + identifier: null + ignored: false + isFixed: false + isPatchable: false + isPinnable: false + isUpgradable: false + languages: + - javascript + - ruby + - java + - scala + - python + - golang + - php + - dotnet + - swift-objective-c + - elixir + - docker + - terraform + - kubernetes + - helm + - cloudformation + orgs: null + patched: false + priorityScore: + max: 1000 + min: 0 + projects: null + severity: + - critical + - high + - medium + - low + types: + - vuln + - license + - configuration + request.method: POST + request.timeout: 120s + request.transforms: + - set: + target: header.Authorization + value: token + - set: + target: url.params.to + value: '[[ formatDate (now) "2006-01-02" ]]' + - set: + default: '[[ formatDate (now (parseDuration "-24h")) "2006-01-02" ]]' + target: url.params.from + value: '[[ formatDate (now (parseDuration .cursor.interval)) "2006-01-02" ]]' + request.url: https://snyk.io/api/v1/reporting/issues/?page=1&perPage=10&sortBy=issueTitle&order=asc&groupBy=issue + response.pagination: + - set: + fail_on_template_error: true + target: url.params.page + value: '[[if (ne (len .last_response.body.response) 0)]][[add .last_response.page 1]][[end]]' + response.request_body_on_pagination: true + response.split: + target: body.results + tags: + - forwarded + - snyk-vulnerabilities + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/stan.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/stan.yml new file mode 100644 index 00000000000..9fdee28a731 --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/stan.yml @@ -0,0 +1,56 @@ +inputs: + - name: filestream-stan + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.stan.log.enabled} == true or ${kubernetes.hints.stan.enabled} == true + data_stream: + dataset: stan.log + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.stan.log.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - stan-log + data_stream.namespace: default + - name: stan/metrics-stan + type: stan/metrics + use_output: default + streams: + - condition: ${kubernetes.hints.stan.channels.enabled} == true or ${kubernetes.hints.stan.enabled} == true + data_stream: + dataset: stan.channels + type: metrics + hosts: + - ${kubernetes.hints.stan.channels.host|'localhost:8222'} + metricsets: + - channels + period: ${kubernetes.hints.stan.channels.period|'60s'} + - condition: ${kubernetes.hints.stan.stats.enabled} == true or ${kubernetes.hints.stan.enabled} == true + data_stream: + dataset: stan.stats + type: metrics + hosts: + - ${kubernetes.hints.stan.stats.host|'localhost:8222'} + metricsets: + - stats + period: ${kubernetes.hints.stan.stats.period|'60s'} + - condition: ${kubernetes.hints.stan.subscriptions.enabled} == true or ${kubernetes.hints.stan.enabled} == true + data_stream: + dataset: stan.subscriptions + type: metrics + hosts: + - ${kubernetes.hints.stan.subscriptions.host|'localhost:8222'} + metricsets: + - subscriptions + period: ${kubernetes.hints.stan.subscriptions.period|'60s'} + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/suricata.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/suricata.yml new file mode 100644 index 00000000000..374d369783e --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/suricata.yml @@ -0,0 +1,24 @@ +inputs: + - name: filestream-suricata + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.suricata.eve.enabled} == true or ${kubernetes.hints.suricata.enabled} == true + data_stream: + dataset: suricata.eve + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.suricata.eve.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - suricata-eve + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/symantec_endpoint.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/symantec_endpoint.yml new file mode 100644 index 00000000000..fac3f6cbd93 --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/symantec_endpoint.yml @@ -0,0 +1,67 @@ +inputs: + - name: filestream-symantec_endpoint + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.symantec_endpoint.log.enabled} == true and ${kubernetes.hints.symantec_endpoint.enabled} == true + data_stream: + dataset: symantec_endpoint.log + type: logs + exclude_files: + - .gz$ + fields: + _conf: + remove_mapped_fields: false + tz_offset: UTC + fields_under_root: true + parsers: + - container: + format: auto + stream: ${kubernetes.hints.symantec_endpoint.log.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + prospector: + scanner: + symlinks: true + tags: + - symantec-endpoint-log + - forwarded + data_stream.namespace: default + - name: tcp-symantec_endpoint + type: tcp + use_output: default + streams: + - condition: ${kubernetes.hints.symantec_endpoint.log.enabled} == true and ${kubernetes.hints.symantec_endpoint.enabled} == true + data_stream: + dataset: symantec_endpoint.log + type: logs + fields: + _conf: + remove_mapped_fields: false + tz_offset: UTC + fields_under_root: true + host: localhost:9008 + max_message_size: 1 MiB + tags: + - symantec-endpoint-log + - forwarded + data_stream.namespace: default + - name: udp-symantec_endpoint + type: udp + use_output: default + streams: + - condition: ${kubernetes.hints.symantec_endpoint.log.enabled} == true or ${kubernetes.hints.symantec_endpoint.enabled} == true + data_stream: + dataset: symantec_endpoint.log + type: logs + fields: + _conf: + remove_mapped_fields: false + tz_offset: UTC + fields_under_root: true + host: localhost:9008 + max_message_size: 1 MiB + tags: + - symantec-endpoint-log + - forwarded + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/synthetics.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/synthetics.yml new file mode 100644 index 00000000000..2f375b1a3f0 --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/synthetics.yml @@ -0,0 +1,148 @@ +inputs: + - name: synthetics/http-synthetics + type: synthetics/http + use_output: default + streams: + - __ui: null + check.request.method: null + condition: ${kubernetes.hints.synthetics.http.enabled} == true and ${kubernetes.hints.synthetics.enabled} == true + data_stream: + dataset: http + type: synthetics + enabled: true + max_redirects: null + name: null + processors: + - add_observer_metadata: + geo: + name: Fleet managed + - add_fields: + fields: + monitor.fleet_managed: true + target: "" + response.include_body: null + response.include_headers: null + schedule: '@every 3m' + timeout: null + type: http + urls: null + data_stream.namespace: default + - name: synthetics/tcp-synthetics + type: synthetics/tcp + use_output: default + streams: + - __ui: null + condition: ${kubernetes.hints.synthetics.tcp.enabled} == true and ${kubernetes.hints.synthetics.enabled} == true + data_stream: + dataset: tcp + type: synthetics + enabled: true + hosts: ${kubernetes.hints.synthetics.tcp.host|''} + name: null + processors: + - add_observer_metadata: + geo: + name: Fleet managed + - add_fields: + fields: + monitor.fleet_managed: true + target: "" + proxy_use_local_resolver: false + schedule: '@every 3m' + timeout: null + type: tcp + data_stream.namespace: default + - name: synthetics/icmp-synthetics + type: synthetics/icmp + use_output: default + streams: + - __ui: null + condition: ${kubernetes.hints.synthetics.icmp.enabled} == true and ${kubernetes.hints.synthetics.enabled} == true + data_stream: + dataset: icmp + type: synthetics + enabled: true + hosts: ${kubernetes.hints.synthetics.icmp.host|''} + name: null + processors: + - add_observer_metadata: + geo: + name: Fleet managed + - add_fields: + fields: + monitor.fleet_managed: true + target: "" + schedule: '@every 3m' + timeout: null + type: icmp + wait: 1s + data_stream.namespace: default + - name: synthetics/browser-synthetics + type: synthetics/browser + use_output: default + streams: + - __ui: null + condition: ${kubernetes.hints.synthetics.browser.enabled} == true or ${kubernetes.hints.synthetics.enabled} == true + data_stream: + dataset: browser + type: synthetics + enabled: true + name: null + processors: + - add_observer_metadata: + geo: + name: Fleet managed + - add_fields: + fields: + monitor.fleet_managed: true + target: "" + schedule: '@every 3m' + throttling: null + timeout: null + type: browser + - condition: ${kubernetes.hints.synthetics.browser_network.enabled} == true or ${kubernetes.hints.synthetics.enabled} == true + data_stream: + dataset: browser.network + type: synthetics + processors: + - add_observer_metadata: + geo: + name: Fleet managed + - add_fields: + fields: + monitor.fleet_managed: true + target: "" + - condition: ${kubernetes.hints.synthetics.browser_screenshot.enabled} == true or ${kubernetes.hints.synthetics.enabled} == true + data_stream: + dataset: browser.screenshot + type: synthetics + processors: + - add_observer_metadata: + geo: + name: Fleet managed + - add_fields: + fields: + monitor.fleet_managed: true + target: "" + data_stream.namespace: default + - name: filestream-synthetics + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.synthetics.container_logs.enabled} == true + data_stream: + dataset: kubernetes.container_logs + type: logs + exclude_files: [] + exclude_lines: [] + parsers: + - container: + format: auto + stream: all + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + prospector: + scanner: + symlinks: true + tags: [] + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/tcp.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/tcp.yml new file mode 100644 index 00000000000..34c8d0d984e --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/tcp.yml @@ -0,0 +1,32 @@ +inputs: + - name: filestream-tcp + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.tcp.container_logs.enabled} == true + data_stream: + dataset: kubernetes.container_logs + type: logs + exclude_files: [] + exclude_lines: [] + parsers: + - container: + format: auto + stream: all + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + prospector: + scanner: + symlinks: true + tags: [] + data_stream.namespace: default + - name: tcp-tcp + type: tcp + use_output: default + streams: + - condition: ${kubernetes.hints.tcp.generic.enabled} == true or ${kubernetes.hints.tcp.enabled} == true + data_stream: + dataset: tcp.generic + type: logs + host: localhost:8080 + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/tomcat.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/tomcat.yml new file mode 100644 index 00000000000..1355b57befa --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/tomcat.yml @@ -0,0 +1,8296 @@ +inputs: + - name: udp-tomcat + type: udp + use_output: default + streams: + - condition: ${kubernetes.hints.tomcat.log.enabled} == true or ${kubernetes.hints.tomcat.enabled} == true + data_stream: + dataset: tomcat.log + type: logs + fields: + observer: + product: TomCat + type: Web + vendor: Apache + fields_under_root: true + host: localhost:9523 + processors: + - script: + lang: javascript + params: + debug: false + ecs: true + keep_raw: false + rsa: true + tz_offset: local + source: | + // Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + // or more contributor license agreements. Licensed under the Elastic License; + // you may not use this file except in compliance with the Elastic License. + + /* jshint -W014,-W016,-W097,-W116 */ + + var processor = require("processor"); + var console = require("console"); + + var FLAG_FIELD = "log.flags"; + var FIELDS_OBJECT = "nwparser"; + var FIELDS_PREFIX = FIELDS_OBJECT + "."; + + var defaults = { + debug: false, + ecs: true, + rsa: false, + keep_raw: false, + tz_offset: "local", + strip_priority: true + }; + + var saved_flags = null; + var debug; + var map_ecs; + var map_rsa; + var keep_raw; + var device; + var tz_offset; + var strip_priority; + + // Register params from configuration. + function register(params) { + debug = params.debug !== undefined ? params.debug : defaults.debug; + map_ecs = params.ecs !== undefined ? params.ecs : defaults.ecs; + map_rsa = params.rsa !== undefined ? params.rsa : defaults.rsa; + keep_raw = params.keep_raw !== undefined ? params.keep_raw : defaults.keep_raw; + tz_offset = parse_tz_offset(params.tz_offset !== undefined? params.tz_offset : defaults.tz_offset); + strip_priority = params.strip_priority !== undefined? params.strip_priority : defaults.strip_priority; + device = new DeviceProcessor(); + } + + function parse_tz_offset(offset) { + var date; + var m; + switch(offset) { + // local uses the tz offset from the JS VM. + case "local": + date = new Date(); + // Reversing the sign as we the offset from UTC, not to UTC. + return parse_local_tz_offset(-date.getTimezoneOffset()); + // event uses the tz offset from event.timezone (add_locale processor). + case "event": + return offset; + // Otherwise a tz offset in the form "[+-][0-9]{4}" is required. + default: + m = offset.match(/^([+\-])([0-9]{2}):?([0-9]{2})?$/); + if (m === null || m.length !== 4) { + throw("bad timezone offset: '" + offset + "'. Must have the form +HH:MM"); + } + return m[1] + m[2] + ":" + (m[3]!==undefined? m[3] : "00"); + } + } + + function parse_local_tz_offset(minutes) { + var neg = minutes < 0; + minutes = Math.abs(minutes); + var min = minutes % 60; + var hours = Math.floor(minutes / 60); + var pad2digit = function(n) { + if (n < 10) { return "0" + n;} + return "" + n; + }; + return (neg? "-" : "+") + pad2digit(hours) + ":" + pad2digit(min); + } + + function process(evt) { + // Function register is only called by the processor when `params` are set + // in the processor config. + if (device === undefined) { + register(defaults); + } + return device.process(evt); + } + + function processor_chain(subprocessors) { + var builder = new processor.Chain(); + subprocessors.forEach(builder.Add); + return builder.Build().Run; + } + + function linear_select(subprocessors) { + return function (evt) { + var flags = evt.Get(FLAG_FIELD); + var i; + for (i = 0; i < subprocessors.length; i++) { + evt.Delete(FLAG_FIELD); + if (debug) console.warn("linear_select trying entry " + i); + subprocessors[i](evt); + // Dissect processor succeeded? + if (evt.Get(FLAG_FIELD) == null) break; + if (debug) console.warn("linear_select failed entry " + i); + } + if (flags !== null) { + evt.Put(FLAG_FIELD, flags); + } + if (debug) { + if (i < subprocessors.length) { + console.warn("linear_select matched entry " + i); + } else { + console.warn("linear_select didn't match"); + } + } + }; + } + + function conditional(opt) { + return function(evt) { + if (opt.if(evt)) { + opt.then(evt); + } else if (opt.else) { + opt.else(evt); + } + }; + } + + var strip_syslog_priority = (function() { + var isEnabled = function() { return strip_priority === true; }; + var fetchPRI = field("_pri"); + var fetchPayload = field("payload"); + var removePayload = remove(["payload"]); + var cleanup = remove(["_pri", "payload"]); + var onMatch = function(evt) { + var pri, priStr = fetchPRI(evt); + if (priStr != null + && 0 < priStr.length && priStr.length < 4 + && !isNaN((pri = Number(priStr))) + && 0 <= pri && pri < 192) { + var severity = pri & 7, + facility = pri >> 3; + setc("_severity", "" + severity)(evt); + setc("_facility", "" + facility)(evt); + // Replace message with priority stripped. + evt.Put("message", fetchPayload(evt)); + removePayload(evt); + } else { + // not a valid syslog PRI, cleanup. + cleanup(evt); + } + }; + return conditional({ + if: isEnabled, + then: cleanup_flags(match( + "STRIP_PRI", + "message", + "<%{_pri}>%{payload}", + onMatch + )) + }); + })(); + + function match(id, src, pattern, on_success) { + var dissect = new processor.Dissect({ + field: src, + tokenizer: pattern, + target_prefix: FIELDS_OBJECT, + ignore_failure: true, + overwrite_keys: true, + trim_values: "right" + }); + return function (evt) { + var msg = evt.Get(src); + dissect.Run(evt); + var failed = evt.Get(FLAG_FIELD) != null; + if (debug) { + if (failed) { + console.debug("dissect fail: " + id + " field:" + src); + } else { + console.debug("dissect OK: " + id + " field:" + src); + } + console.debug(" expr: <<" + pattern + ">>"); + console.debug(" input: <<" + msg + ">>"); + } + if (on_success != null && !failed) { + on_success(evt); + } + }; + } + + function match_copy(id, src, dst, on_success) { + dst = FIELDS_PREFIX + dst; + if (dst === FIELDS_PREFIX || dst === src) { + return function (evt) { + if (debug) { + console.debug("noop OK: " + id + " field:" + src); + console.debug(" input: <<" + evt.Get(src) + ">>"); + } + if (on_success != null) on_success(evt); + } + } + return function (evt) { + var msg = evt.Get(src); + evt.Put(dst, msg); + if (debug) { + console.debug("copy OK: " + id + " field:" + src); + console.debug(" target: '" + dst + "'"); + console.debug(" input: <<" + msg + ">>"); + } + if (on_success != null) on_success(evt); + } + } + + function cleanup_flags(processor) { + return function(evt) { + processor(evt); + evt.Delete(FLAG_FIELD); + }; + } + + function all_match(opts) { + return function (evt) { + var i; + for (i = 0; i < opts.processors.length; i++) { + evt.Delete(FLAG_FIELD); + opts.processors[i](evt); + // Dissect processor succeeded? + if (evt.Get(FLAG_FIELD) != null) { + if (debug) console.warn("all_match failure at " + i); + if (opts.on_failure != null) opts.on_failure(evt); + return; + } + if (debug) console.warn("all_match success at " + i); + } + if (opts.on_success != null) opts.on_success(evt); + }; + } + + function msgid_select(mapping) { + return function (evt) { + var msgid = evt.Get(FIELDS_PREFIX + "messageid"); + if (msgid == null) { + if (debug) console.warn("msgid_select: no messageid captured!"); + return; + } + var next = mapping[msgid]; + if (next === undefined) { + if (debug) console.warn("msgid_select: no mapping for messageid:" + msgid); + return; + } + if (debug) console.info("msgid_select: matched key=" + msgid); + return next(evt); + }; + } + + function msg(msg_id, match) { + return function (evt) { + match(evt); + if (evt.Get(FLAG_FIELD) == null) { + evt.Put(FIELDS_PREFIX + "msg_id1", msg_id); + } + }; + } + + var start; + + function save_flags(evt) { + saved_flags = evt.Get(FLAG_FIELD); + evt.Put("event.original", evt.Get("message")); + } + + function restore_flags(evt) { + if (saved_flags !== null) { + evt.Put(FLAG_FIELD, saved_flags); + } + evt.Delete("message"); + } + + function constant(value) { + return function (evt) { + return value; + }; + } + + function field(name) { + var fullname = FIELDS_PREFIX + name; + return function (evt) { + return evt.Get(fullname); + }; + } + + function STRCAT(args) { + var s = ""; + var i; + for (i = 0; i < args.length; i++) { + s += args[i]; + } + return s; + } + + // TODO: Implement + function DIRCHK(args) { + unimplemented("DIRCHK"); + } + + function strictToInt(str) { + return str * 1; + } + + function CALC(args) { + if (args.length !== 3) { + console.warn("skipped call to CALC with " + args.length + " arguments."); + return; + } + var a = strictToInt(args[0]); + var b = strictToInt(args[2]); + if (isNaN(a) || isNaN(b)) { + console.warn("failed evaluating CALC arguments a='" + args[0] + "' b='" + args[2] + "'."); + return; + } + var result; + switch (args[1]) { + case "+": + result = a + b; + break; + case "-": + result = a - b; + break; + case "*": + result = a * b; + break; + default: + // Only * and + seen in the parsers. + console.warn("unknown CALC operation '" + args[1] + "'."); + return; + } + // Always return a string + return result !== undefined ? "" + result : result; + } + + var quoteChars = "\"'`"; + function RMQ(args) { + if(args.length !== 1) { + console.warn("RMQ: only one argument expected"); + return; + } + var value = args[0].trim(); + var n = value.length; + var char; + return n > 1 + && (char=value.charAt(0)) === value.charAt(n-1) + && quoteChars.indexOf(char) !== -1? + value.substr(1, n-2) + : value; + } + + function call(opts) { + var args = new Array(opts.args.length); + return function (evt) { + for (var i = 0; i < opts.args.length; i++) + if ((args[i] = opts.args[i](evt)) == null) return; + var result = opts.fn(args); + if (result != null) { + evt.Put(opts.dest, result); + } + }; + } + + function nop(evt) { + } + + function appendErrorMsg(evt, msg) { + var value = evt.Get("error.message"); + if (value == null) { + value = [msg]; + } else if (msg instanceof Array) { + value.push(msg); + } else { + value = [value, msg]; + } + evt.Put("error.message", value); + } + + function unimplemented(name) { + appendErrorMsg("unimplemented feature: " + name); + } + + function lookup(opts) { + return function (evt) { + var key = opts.key(evt); + if (key == null) return; + var value = opts.map.keyvaluepairs[key]; + if (value === undefined) { + value = opts.map.default; + } + if (value !== undefined) { + evt.Put(opts.dest, value(evt)); + } + }; + } + + function set(fields) { + return new processor.AddFields({ + target: FIELDS_OBJECT, + fields: fields, + }); + } + + function setf(dst, src) { + return function (evt) { + var val = evt.Get(FIELDS_PREFIX + src); + if (val != null) evt.Put(FIELDS_PREFIX + dst, val); + }; + } + + function setc(dst, value) { + return function (evt) { + evt.Put(FIELDS_PREFIX + dst, value); + }; + } + + function set_field(opts) { + return function (evt) { + var val = opts.value(evt); + if (val != null) evt.Put(opts.dest, val); + }; + } + + function dump(label) { + return function (evt) { + console.log("Dump of event at " + label + ": " + JSON.stringify(evt, null, "\t")); + }; + } + + function date_time_join_args(evt, arglist) { + var str = ""; + for (var i = 0; i < arglist.length; i++) { + var fname = FIELDS_PREFIX + arglist[i]; + var val = evt.Get(fname); + if (val != null) { + if (str !== "") str += " "; + str += val; + } else { + if (debug) console.warn("in date_time: input arg " + fname + " is not set"); + } + } + return str; + } + + function to2Digit(num) { + return num? (num < 10? "0" + num : num) : "00"; + } + + // Make two-digit dates 00-69 interpreted as 2000-2069 + // and dates 70-99 translated to 1970-1999. + var twoDigitYearEpoch = 70; + var twoDigitYearCentury = 2000; + + // This is to accept dates up to 2 days in the future, only used when + // no year is specified in a date. 2 days should be enough to account for + // time differences between systems and different tz offsets. + var maxFutureDelta = 2*24*60*60*1000; + + // DateContainer stores date fields and then converts those fields into + // a Date. Necessary because building a Date using its set() methods gives + // different results depending on the order of components. + function DateContainer(tzOffset) { + this.offset = tzOffset === undefined? "Z" : tzOffset; + } + + DateContainer.prototype = { + setYear: function(v) {this.year = v;}, + setMonth: function(v) {this.month = v;}, + setDay: function(v) {this.day = v;}, + setHours: function(v) {this.hours = v;}, + setMinutes: function(v) {this.minutes = v;}, + setSeconds: function(v) {this.seconds = v;}, + + setUNIX: function(v) {this.unix = v;}, + + set2DigitYear: function(v) { + this.year = v < twoDigitYearEpoch? twoDigitYearCentury + v : twoDigitYearCentury + v - 100; + }, + + toDate: function() { + if (this.unix !== undefined) { + return new Date(this.unix * 1000); + } + if (this.day === undefined || this.month === undefined) { + // Can't make a date from this. + return undefined; + } + if (this.year === undefined) { + // A date without a year. Set current year, or previous year + // if date would be in the future. + var now = new Date(); + this.year = now.getFullYear(); + var date = this.toDate(); + if (date.getTime() - now.getTime() > maxFutureDelta) { + date.setFullYear(now.getFullYear() - 1); + } + return date; + } + var MM = to2Digit(this.month); + var DD = to2Digit(this.day); + var hh = to2Digit(this.hours); + var mm = to2Digit(this.minutes); + var ss = to2Digit(this.seconds); + return new Date(this.year + "-" + MM + "-" + DD + "T" + hh + ":" + mm + ":" + ss + this.offset); + } + } + + function date_time_try_pattern(fmt, str, tzOffset) { + var date = new DateContainer(tzOffset); + var pos = date_time_try_pattern_at_pos(fmt, str, 0, date); + return pos !== undefined? date.toDate() : undefined; + } + + function date_time_try_pattern_at_pos(fmt, str, pos, date) { + var len = str.length; + for (var proc = 0; pos !== undefined && pos < len && proc < fmt.length; proc++) { + pos = fmt[proc](str, pos, date); + } + return pos; + } + + function date_time(opts) { + return function (evt) { + var tzOffset = opts.tz || tz_offset; + if (tzOffset === "event") { + tzOffset = evt.Get("event.timezone"); + } + var str = date_time_join_args(evt, opts.args); + for (var i = 0; i < opts.fmts.length; i++) { + var date = date_time_try_pattern(opts.fmts[i], str, tzOffset); + if (date !== undefined) { + evt.Put(FIELDS_PREFIX + opts.dest, date); + return; + } + } + if (debug) console.warn("in date_time: id=" + opts.id + " FAILED: " + str); + }; + } + + var uA = 60 * 60 * 24; + var uD = 60 * 60 * 24; + var uF = 60 * 60; + var uG = 60 * 60 * 24 * 30; + var uH = 60 * 60; + var uI = 60 * 60; + var uJ = 60 * 60 * 24; + var uM = 60 * 60 * 24 * 30; + var uN = 60 * 60; + var uO = 1; + var uS = 1; + var uT = 60; + var uU = 60; + var uc = dc; + + function duration(opts) { + return function(evt) { + var str = date_time_join_args(evt, opts.args); + for (var i = 0; i < opts.fmts.length; i++) { + var seconds = duration_try_pattern(opts.fmts[i], str); + if (seconds !== undefined) { + evt.Put(FIELDS_PREFIX + opts.dest, seconds); + return; + } + } + if (debug) console.warn("in duration: id=" + opts.id + " (s) FAILED: " + str); + }; + } + + function duration_try_pattern(fmt, str) { + var secs = 0; + var pos = 0; + for (var i=0; i [ month_id , how many chars to skip if month in long form ] + "Jan": [0, 4], + "Feb": [1, 5], + "Mar": [2, 2], + "Apr": [3, 2], + "May": [4, 0], + "Jun": [5, 1], + "Jul": [6, 1], + "Aug": [7, 3], + "Sep": [8, 6], + "Oct": [9, 4], + "Nov": [10, 5], + "Dec": [11, 4], + "jan": [0, 4], + "feb": [1, 5], + "mar": [2, 2], + "apr": [3, 2], + "may": [4, 0], + "jun": [5, 1], + "jul": [6, 1], + "aug": [7, 3], + "sep": [8, 6], + "oct": [9, 4], + "nov": [10, 5], + "dec": [11, 4], + }; + + // var dC = undefined; + var dR = dateMonthName(true); + var dB = dateMonthName(false); + var dM = dateFixedWidthNumber("M", 2, 1, 12, DateContainer.prototype.setMonth); + var dG = dateVariableWidthNumber("G", 1, 12, DateContainer.prototype.setMonth); + var dD = dateFixedWidthNumber("D", 2, 1, 31, DateContainer.prototype.setDay); + var dF = dateVariableWidthNumber("F", 1, 31, DateContainer.prototype.setDay); + var dH = dateFixedWidthNumber("H", 2, 0, 24, DateContainer.prototype.setHours); + var dI = dateVariableWidthNumber("I", 0, 24, DateContainer.prototype.setHours); // Accept hours >12 + var dN = dateVariableWidthNumber("N", 0, 24, DateContainer.prototype.setHours); + var dT = dateFixedWidthNumber("T", 2, 0, 59, DateContainer.prototype.setMinutes); + var dU = dateVariableWidthNumber("U", 0, 59, DateContainer.prototype.setMinutes); + var dP = parseAMPM; // AM|PM + var dQ = parseAMPM; // A.M.|P.M + var dS = dateFixedWidthNumber("S", 2, 0, 60, DateContainer.prototype.setSeconds); + var dO = dateVariableWidthNumber("O", 0, 60, DateContainer.prototype.setSeconds); + var dY = dateFixedWidthNumber("Y", 2, 0, 99, DateContainer.prototype.set2DigitYear); + var dW = dateFixedWidthNumber("W", 4, 1000, 9999, DateContainer.prototype.setYear); + var dZ = parseHMS; + var dX = dateVariableWidthNumber("X", 0, 0x10000000000, DateContainer.prototype.setUNIX); + + // parseAMPM parses "A.M", "AM", "P.M", "PM" from logs. + // Only works if this modifier appears after the hour has been read from logs + // which is always the case in the 300 devices. + function parseAMPM(str, pos, date) { + var n = str.length; + var start = skipws(str, pos); + if (start + 2 > n) return; + var head = str.substr(start, 2).toUpperCase(); + var isPM = false; + var skip = false; + switch (head) { + case "A.": + skip = true; + /* falls through */ + case "AM": + break; + case "P.": + skip = true; + /* falls through */ + case "PM": + isPM = true; + break; + default: + if (debug) console.warn("can't parse pos " + start + " as AM/PM: " + str + "(head:" + head + ")"); + return; + } + pos = start + 2; + if (skip) { + if (pos+2 > n || str.substr(pos, 2).toUpperCase() !== "M.") { + if (debug) console.warn("can't parse pos " + start + " as AM/PM: " + str + "(tail)"); + return; + } + pos += 2; + } + var hh = date.hours; + if (isPM) { + // Accept existing hour in 24h format. + if (hh < 12) hh += 12; + } else { + if (hh === 12) hh = 0; + } + date.setHours(hh); + return pos; + } + + function parseHMS(str, pos, date) { + return date_time_try_pattern_at_pos([dN, dc(":"), dU, dc(":"), dO], str, pos, date); + } + + function skipws(str, pos) { + for ( var n = str.length; + pos < n && str.charAt(pos) === " "; + pos++) + ; + return pos; + } + + function skipdigits(str, pos) { + var c; + for (var n = str.length; + pos < n && (c = str.charAt(pos)) >= "0" && c <= "9"; + pos++) + ; + return pos; + } + + function dSkip(str, pos, date) { + var chr; + for (;pos < str.length && (chr=str[pos])<'0' || chr>'9'; pos++) {} + return pos < str.length? pos : undefined; + } + + function dateVariableWidthNumber(fmtChar, min, max, setter) { + return function (str, pos, date) { + var start = skipws(str, pos); + pos = skipdigits(str, start); + var s = str.substr(start, pos - start); + var value = parseInt(s, 10); + if (value >= min && value <= max) { + setter.call(date, value); + return pos; + } + return; + }; + } + + function dateFixedWidthNumber(fmtChar, width, min, max, setter) { + return function (str, pos, date) { + pos = skipws(str, pos); + var n = str.length; + if (pos + width > n) return; + var s = str.substr(pos, width); + var value = parseInt(s, 10); + if (value >= min && value <= max) { + setter.call(date, value); + return pos + width; + } + return; + }; + } + + // Short month name (Jan..Dec). + function dateMonthName(long) { + return function (str, pos, date) { + pos = skipws(str, pos); + var n = str.length; + if (pos + 3 > n) return; + var mon = str.substr(pos, 3); + var idx = shortMonths[mon]; + if (idx === undefined) { + idx = shortMonths[mon.toLowerCase()]; + } + if (idx === undefined) { + //console.warn("parsing date_time: '" + mon + "' is not a valid short month (%B)"); + return; + } + date.setMonth(idx[0]+1); + return pos + 3 + (long ? idx[1] : 0); + }; + } + + function url_wrapper(dst, src, fn) { + return function(evt) { + var value = evt.Get(FIELDS_PREFIX + src), result; + if (value != null && (result = fn(value))!== undefined) { + evt.Put(FIELDS_PREFIX + dst, result); + } else { + console.debug(fn.name + " failed for '" + value + "'"); + } + }; + } + + // The following regular expression for parsing URLs from: + // https://github.com/wizard04wsu/URI_Parsing + // + // The MIT License (MIT) + // + // Copyright (c) 2014 Andrew Harrison + // + // Permission is hereby granted, free of charge, to any person obtaining a copy of + // this software and associated documentation files (the "Software"), to deal in + // the Software without restriction, including without limitation the rights to + // use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + // the Software, and to permit persons to whom the Software is furnished to do so, + // subject to the following conditions: + // + // The above copyright notice and this permission notice shall be included in all + // copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + // FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + // COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + // IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + // CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + var uriRegExp = /^([a-z][a-z0-9+.\-]*):(?:\/\/((?:(?=((?:[a-z0-9\-._~!$&'()*+,;=:]|%[0-9A-F]{2})*))(\3)@)?(?=(\[[0-9A-F:.]{2,}\]|(?:[a-z0-9\-._~!$&'()*+,;=]|%[0-9A-F]{2})*))\5(?::(?=(\d*))\6)?)(\/(?=((?:[a-z0-9-._~!$&'()*+,;=:@\/]|%[0-9A-F]{2})*))\8)?|(\/?(?!\/)(?=((?:[a-z0-9-._~!$&'()*+,;=:@\/]|%[0-9A-F]{2})*))\10)?)(?:\?(?=((?:[a-z0-9-._~!$&'()*+,;=:@\/?]|%[0-9A-F]{2})*))\11)?(?:#(?=((?:[a-z0-9-._~!$&'()*+,;=:@\/?]|%[0-9A-F]{2})*))\12)?$/i; + + var uriScheme = 1; + var uriDomain = 5; + var uriPort = 6; + var uriPath = 7; + var uriPathAlt = 9; + var uriQuery = 11; + + function domain(dst, src) { + return url_wrapper(dst, src, extract_domain); + } + + function split_url(value) { + var m = value.match(uriRegExp); + if (m && m[uriDomain]) return m; + // Support input in the form "www.example.net/path", but not "/path". + m = ("null://" + value).match(uriRegExp); + if (m) return m; + } + + function extract_domain(value) { + var m = split_url(value); + if (m && m[uriDomain]) return m[uriDomain]; + } + + var extFromPage = /\.[^.]+$/; + function extract_ext(value) { + var page = extract_page(value); + if (page) { + var m = page.match(extFromPage); + if (m) return m[0]; + } + } + + function ext(dst, src) { + return url_wrapper(dst, src, extract_ext); + } + + function fqdn(dst, src) { + // TODO: fqdn and domain(eTLD+1) are currently the same. + return domain(dst, src); + } + + var pageFromPathRegExp = /\/([^\/]+)$/; + var pageName = 1; + + function extract_page(value) { + value = extract_path(value); + if (!value) return undefined; + var m = value.match(pageFromPathRegExp); + if (m) return m[pageName]; + } + + function page(dst, src) { + return url_wrapper(dst, src, extract_page); + } + + function extract_path(value) { + var m = split_url(value); + return m? m[uriPath] || m[uriPathAlt] : undefined; + } + + function path(dst, src) { + return url_wrapper(dst, src, extract_path); + } + + // Map common schemes to their default port. + // port has to be a string (will be converted at a later stage). + var schemePort = { + "ftp": "21", + "ssh": "22", + "http": "80", + "https": "443", + }; + + function extract_port(value) { + var m = split_url(value); + if (!m) return undefined; + if (m[uriPort]) return m[uriPort]; + if (m[uriScheme]) { + return schemePort[m[uriScheme]]; + } + } + + function port(dst, src) { + return url_wrapper(dst, src, extract_port); + } + + function extract_query(value) { + var m = split_url(value); + if (m && m[uriQuery]) return m[uriQuery]; + } + + function query(dst, src) { + return url_wrapper(dst, src, extract_query); + } + + function extract_root(value) { + var m = split_url(value); + if (m && m[uriDomain] && m[uriDomain]) { + var scheme = m[uriScheme] && m[uriScheme] !== "null"? + m[uriScheme] + "://" : ""; + var port = m[uriPort]? ":" + m[uriPort] : ""; + return scheme + m[uriDomain] + port; + } + } + + function root(dst, src) { + return url_wrapper(dst, src, extract_root); + } + + function tagval(id, src, cfg, keys, on_success) { + var fail = function(evt) { + evt.Put(FLAG_FIELD, "tagval_parsing_error"); + } + if (cfg.kv_separator.length !== 1) { + throw("Invalid TAGVALMAP ValueDelimiter (must have 1 character)"); + } + var quotes_len = cfg.open_quote.length > 0 && cfg.close_quote.length > 0? + cfg.open_quote.length + cfg.close_quote.length : 0; + var kv_regex = new RegExp('^([^' + cfg.kv_separator + ']*)*' + cfg.kv_separator + ' *(.*)*$'); + return function(evt) { + var msg = evt.Get(src); + if (msg === undefined) { + console.warn("tagval: input field is missing"); + return fail(evt); + } + var pairs = msg.split(cfg.pair_separator); + var i; + var success = false; + var prev = ""; + for (i=0; i 0 && + value.length >= cfg.open_quote.length + cfg.close_quote.length && + value.substr(0, cfg.open_quote.length) === cfg.open_quote && + value.substr(value.length - cfg.close_quote.length) === cfg.close_quote) { + value = value.substr(cfg.open_quote.length, value.length - quotes_len); + } + evt.Put(FIELDS_PREFIX + field, value); + success = true; + } + if (!success) { + return fail(evt); + } + if (on_success != null) { + on_success(evt); + } + } + } + + var ecs_mappings = { + "_facility": {convert: to_long, to:[{field: "log.syslog.facility.code", setter: fld_set}]}, + "_pri": {convert: to_long, to:[{field: "log.syslog.priority", setter: fld_set}]}, + "_severity": {convert: to_long, to:[{field: "log.syslog.severity.code", setter: fld_set}]}, + "action": {to:[{field: "event.action", setter: fld_prio, prio: 0}]}, + "administrator": {to:[{field: "related.user", setter: fld_append},{field: "user.name", setter: fld_prio, prio: 4}]}, + "alias.ip": {convert: to_ip, to:[{field: "host.ip", setter: fld_prio, prio: 3},{field: "related.ip", setter: fld_append}]}, + "alias.ipv6": {convert: to_ip, to:[{field: "host.ip", setter: fld_prio, prio: 4},{field: "related.ip", setter: fld_append}]}, + "alias.mac": {convert: to_mac, to:[{field: "host.mac", setter: fld_prio, prio: 1}]}, + "application": {to:[{field: "network.application", setter: fld_set}]}, + "bytes": {convert: to_long, to:[{field: "network.bytes", setter: fld_set}]}, + "c_domain": {to:[{field: "source.domain", setter: fld_prio, prio: 1}]}, + "c_logon_id": {to:[{field: "user.id", setter: fld_prio, prio: 2}]}, + "c_user_name": {to:[{field: "related.user", setter: fld_append},{field: "user.name", setter: fld_prio, prio: 8}]}, + "c_username": {to:[{field: "related.user", setter: fld_append},{field: "user.name", setter: fld_prio, prio: 2}]}, + "cctld": {to:[{field: "url.top_level_domain", setter: fld_prio, prio: 1}]}, + "child_pid": {convert: to_long, to:[{field: "process.pid", setter: fld_prio, prio: 1}]}, + "child_pid_val": {to:[{field: "process.title", setter: fld_set}]}, + "child_process": {to:[{field: "process.name", setter: fld_prio, prio: 1}]}, + "city.dst": {to:[{field: "destination.geo.city_name", setter: fld_set}]}, + "city.src": {to:[{field: "source.geo.city_name", setter: fld_set}]}, + "daddr": {convert: to_ip, to:[{field: "destination.ip", setter: fld_set},{field: "related.ip", setter: fld_append}]}, + "daddr_v6": {convert: to_ip, to:[{field: "destination.ip", setter: fld_set},{field: "related.ip", setter: fld_append}]}, + "ddomain": {to:[{field: "destination.domain", setter: fld_prio, prio: 0}]}, + "devicehostip": {convert: to_ip, to:[{field: "host.ip", setter: fld_prio, prio: 2},{field: "related.ip", setter: fld_append}]}, + "devicehostmac": {convert: to_mac, to:[{field: "host.mac", setter: fld_prio, prio: 0}]}, + "dhost": {to:[{field: "destination.address", setter: fld_set},{field: "related.hosts", setter: fld_append}]}, + "dinterface": {to:[{field: "observer.egress.interface.name", setter: fld_set}]}, + "direction": {to:[{field: "network.direction", setter: fld_set}]}, + "directory": {to:[{field: "file.directory", setter: fld_set}]}, + "dmacaddr": {convert: to_mac, to:[{field: "destination.mac", setter: fld_set}]}, + "dns.responsetype": {to:[{field: "dns.answers.type", setter: fld_set}]}, + "dns.resptext": {to:[{field: "dns.answers.name", setter: fld_set}]}, + "dns_querytype": {to:[{field: "dns.question.type", setter: fld_set}]}, + "domain": {to:[{field: "server.domain", setter: fld_prio, prio: 0},{field: "related.hosts", setter: fld_append}]}, + "domain.dst": {to:[{field: "destination.domain", setter: fld_prio, prio: 1}]}, + "domain.src": {to:[{field: "source.domain", setter: fld_prio, prio: 2}]}, + "domain_id": {to:[{field: "user.domain", setter: fld_set}]}, + "domainname": {to:[{field: "server.domain", setter: fld_prio, prio: 1}]}, + "dport": {convert: to_long, to:[{field: "destination.port", setter: fld_prio, prio: 0}]}, + "dtransaddr": {convert: to_ip, to:[{field: "destination.nat.ip", setter: fld_prio, prio: 0},{field: "related.ip", setter: fld_append}]}, + "dtransport": {convert: to_long, to:[{field: "destination.nat.port", setter: fld_prio, prio: 0}]}, + "ec_outcome": {to:[{field: "event.outcome", setter: fld_ecs_outcome}]}, + "event_description": {to:[{field: "message", setter: fld_prio, prio: 0}]}, + "event_source": {to:[{field: "related.hosts", setter: fld_append}]}, + "event_time": {convert: to_date, to:[{field: "@timestamp", setter: fld_set}]}, + "event_type": {to:[{field: "event.action", setter: fld_prio, prio: 1}]}, + "extension": {to:[{field: "file.extension", setter: fld_prio, prio: 1}]}, + "file.attributes": {to:[{field: "file.attributes", setter: fld_set}]}, + "filename": {to:[{field: "file.name", setter: fld_prio, prio: 0}]}, + "filename_size": {convert: to_long, to:[{field: "file.size", setter: fld_set}]}, + "filepath": {to:[{field: "file.path", setter: fld_set}]}, + "filetype": {to:[{field: "file.type", setter: fld_set}]}, + "fqdn": {to:[{field: "related.hosts", setter: fld_append}]}, + "group": {to:[{field: "group.name", setter: fld_set}]}, + "groupid": {to:[{field: "group.id", setter: fld_set}]}, + "host": {to:[{field: "host.name", setter: fld_prio, prio: 1},{field: "related.hosts", setter: fld_append}]}, + "hostip": {convert: to_ip, to:[{field: "host.ip", setter: fld_prio, prio: 0},{field: "related.ip", setter: fld_append}]}, + "hostip_v6": {convert: to_ip, to:[{field: "host.ip", setter: fld_prio, prio: 1},{field: "related.ip", setter: fld_append}]}, + "hostname": {to:[{field: "host.name", setter: fld_prio, prio: 0}]}, + "id": {to:[{field: "event.code", setter: fld_prio, prio: 0}]}, + "interface": {to:[{field: "network.interface.name", setter: fld_set}]}, + "ip.orig": {convert: to_ip, to:[{field: "network.forwarded_ip", setter: fld_prio, prio: 0},{field: "related.ip", setter: fld_append}]}, + "ip.trans.dst": {convert: to_ip, to:[{field: "destination.nat.ip", setter: fld_prio, prio: 1},{field: "related.ip", setter: fld_append}]}, + "ip.trans.src": {convert: to_ip, to:[{field: "source.nat.ip", setter: fld_prio, prio: 1},{field: "related.ip", setter: fld_append}]}, + "ipv6.orig": {convert: to_ip, to:[{field: "network.forwarded_ip", setter: fld_prio, prio: 2},{field: "related.ip", setter: fld_append}]}, + "latdec_dst": {convert: to_double, to:[{field: "destination.geo.location.lat", setter: fld_set}]}, + "latdec_src": {convert: to_double, to:[{field: "source.geo.location.lat", setter: fld_set}]}, + "location_city": {to:[{field: "geo.city_name", setter: fld_set}]}, + "location_country": {to:[{field: "geo.country_name", setter: fld_set}]}, + "location_desc": {to:[{field: "geo.name", setter: fld_set}]}, + "location_dst": {to:[{field: "destination.geo.country_name", setter: fld_set}]}, + "location_src": {to:[{field: "source.geo.country_name", setter: fld_set}]}, + "location_state": {to:[{field: "geo.region_name", setter: fld_set}]}, + "logon_id": {to:[{field: "related.user", setter: fld_append},{field: "user.name", setter: fld_prio, prio: 5}]}, + "longdec_dst": {convert: to_double, to:[{field: "destination.geo.location.lon", setter: fld_set}]}, + "longdec_src": {convert: to_double, to:[{field: "source.geo.location.lon", setter: fld_set}]}, + "macaddr": {convert: to_mac, to:[{field: "host.mac", setter: fld_prio, prio: 2}]}, + "messageid": {to:[{field: "event.code", setter: fld_prio, prio: 1}]}, + "method": {to:[{field: "http.request.method", setter: fld_set}]}, + "msg": {to:[{field: "message", setter: fld_set}]}, + "orig_ip": {convert: to_ip, to:[{field: "network.forwarded_ip", setter: fld_prio, prio: 1},{field: "related.ip", setter: fld_append}]}, + "owner": {to:[{field: "related.user", setter: fld_append},{field: "user.name", setter: fld_prio, prio: 6}]}, + "packets": {convert: to_long, to:[{field: "network.packets", setter: fld_set}]}, + "parent_pid": {convert: to_long, to:[{field: "process.parent.pid", setter: fld_prio, prio: 0}]}, + "parent_pid_val": {to:[{field: "process.parent.title", setter: fld_set}]}, + "parent_process": {to:[{field: "process.parent.name", setter: fld_prio, prio: 0}]}, + "patient_fullname": {to:[{field: "user.full_name", setter: fld_prio, prio: 1}]}, + "port.dst": {convert: to_long, to:[{field: "destination.port", setter: fld_prio, prio: 1}]}, + "port.src": {convert: to_long, to:[{field: "source.port", setter: fld_prio, prio: 1}]}, + "port.trans.dst": {convert: to_long, to:[{field: "destination.nat.port", setter: fld_prio, prio: 1}]}, + "port.trans.src": {convert: to_long, to:[{field: "source.nat.port", setter: fld_prio, prio: 1}]}, + "process": {to:[{field: "process.name", setter: fld_prio, prio: 0}]}, + "process_id": {convert: to_long, to:[{field: "process.pid", setter: fld_prio, prio: 0}]}, + "process_id_src": {convert: to_long, to:[{field: "process.parent.pid", setter: fld_prio, prio: 1}]}, + "process_src": {to:[{field: "process.parent.name", setter: fld_prio, prio: 1}]}, + "product": {to:[{field: "observer.product", setter: fld_set}]}, + "protocol": {to:[{field: "network.protocol", setter: fld_set}]}, + "query": {to:[{field: "url.query", setter: fld_prio, prio: 2}]}, + "rbytes": {convert: to_long, to:[{field: "destination.bytes", setter: fld_set}]}, + "referer": {to:[{field: "http.request.referrer", setter: fld_prio, prio: 1}]}, + "rulename": {to:[{field: "rule.name", setter: fld_set}]}, + "saddr": {convert: to_ip, to:[{field: "source.ip", setter: fld_set},{field: "related.ip", setter: fld_append}]}, + "saddr_v6": {convert: to_ip, to:[{field: "source.ip", setter: fld_set},{field: "related.ip", setter: fld_append}]}, + "sbytes": {convert: to_long, to:[{field: "source.bytes", setter: fld_set}]}, + "sdomain": {to:[{field: "source.domain", setter: fld_prio, prio: 0}]}, + "service": {to:[{field: "service.name", setter: fld_prio, prio: 1}]}, + "service.name": {to:[{field: "service.name", setter: fld_prio, prio: 0}]}, + "service_account": {to:[{field: "related.user", setter: fld_append},{field: "user.name", setter: fld_prio, prio: 7}]}, + "severity": {to:[{field: "log.level", setter: fld_set}]}, + "shost": {to:[{field: "host.hostname", setter: fld_set},{field: "source.address", setter: fld_set},{field: "related.hosts", setter: fld_append}]}, + "sinterface": {to:[{field: "observer.ingress.interface.name", setter: fld_set}]}, + "sld": {to:[{field: "url.registered_domain", setter: fld_set}]}, + "smacaddr": {convert: to_mac, to:[{field: "source.mac", setter: fld_set}]}, + "sport": {convert: to_long, to:[{field: "source.port", setter: fld_prio, prio: 0}]}, + "stransaddr": {convert: to_ip, to:[{field: "source.nat.ip", setter: fld_prio, prio: 0},{field: "related.ip", setter: fld_append}]}, + "stransport": {convert: to_long, to:[{field: "source.nat.port", setter: fld_prio, prio: 0}]}, + "tcp.dstport": {convert: to_long, to:[{field: "destination.port", setter: fld_prio, prio: 2}]}, + "tcp.srcport": {convert: to_long, to:[{field: "source.port", setter: fld_prio, prio: 2}]}, + "timezone": {to:[{field: "event.timezone", setter: fld_set}]}, + "tld": {to:[{field: "url.top_level_domain", setter: fld_prio, prio: 0}]}, + "udp.dstport": {convert: to_long, to:[{field: "destination.port", setter: fld_prio, prio: 3}]}, + "udp.srcport": {convert: to_long, to:[{field: "source.port", setter: fld_prio, prio: 3}]}, + "uid": {to:[{field: "related.user", setter: fld_append},{field: "user.name", setter: fld_prio, prio: 3}]}, + "url": {to:[{field: "url.original", setter: fld_prio, prio: 1}]}, + "url_raw": {to:[{field: "url.original", setter: fld_prio, prio: 0}]}, + "urldomain": {to:[{field: "url.domain", setter: fld_prio, prio: 0}]}, + "urlquery": {to:[{field: "url.query", setter: fld_prio, prio: 0}]}, + "user": {to:[{field: "related.user", setter: fld_append},{field: "user.name", setter: fld_prio, prio: 0}]}, + "user.id": {to:[{field: "user.id", setter: fld_prio, prio: 1}]}, + "user_agent": {to:[{field: "user_agent.original", setter: fld_set}]}, + "user_fullname": {to:[{field: "user.full_name", setter: fld_prio, prio: 0}]}, + "user_id": {to:[{field: "user.id", setter: fld_prio, prio: 0}]}, + "username": {to:[{field: "related.user", setter: fld_append},{field: "user.name", setter: fld_prio, prio: 1}]}, + "version": {to:[{field: "observer.version", setter: fld_set}]}, + "web_domain": {to:[{field: "url.domain", setter: fld_prio, prio: 1},{field: "related.hosts", setter: fld_append}]}, + "web_extension": {to:[{field: "file.extension", setter: fld_prio, prio: 0}]}, + "web_query": {to:[{field: "url.query", setter: fld_prio, prio: 1}]}, + "web_ref_domain": {to:[{field: "related.hosts", setter: fld_append}]}, + "web_referer": {to:[{field: "http.request.referrer", setter: fld_prio, prio: 0}]}, + "web_root": {to:[{field: "url.path", setter: fld_set}]}, + "webpage": {to:[{field: "file.name", setter: fld_prio, prio: 1}]}, + }; + + var rsa_mappings = { + "access_point": {to:[{field: "rsa.wireless.access_point", setter: fld_set}]}, + "accesses": {to:[{field: "rsa.identity.accesses", setter: fld_set}]}, + "acl_id": {to:[{field: "rsa.misc.acl_id", setter: fld_set}]}, + "acl_op": {to:[{field: "rsa.misc.acl_op", setter: fld_set}]}, + "acl_pos": {to:[{field: "rsa.misc.acl_pos", setter: fld_set}]}, + "acl_table": {to:[{field: "rsa.misc.acl_table", setter: fld_set}]}, + "action": {to:[{field: "rsa.misc.action", setter: fld_append}]}, + "ad_computer_dst": {to:[{field: "rsa.network.ad_computer_dst", setter: fld_set}]}, + "addr": {to:[{field: "rsa.network.addr", setter: fld_set}]}, + "admin": {to:[{field: "rsa.misc.admin", setter: fld_set}]}, + "agent": {to:[{field: "rsa.misc.client", setter: fld_prio, prio: 0}]}, + "agent.id": {to:[{field: "rsa.misc.agent_id", setter: fld_set}]}, + "alarm_id": {to:[{field: "rsa.misc.alarm_id", setter: fld_set}]}, + "alarmname": {to:[{field: "rsa.misc.alarmname", setter: fld_set}]}, + "alert": {to:[{field: "rsa.threat.alert", setter: fld_set}]}, + "alert_id": {to:[{field: "rsa.misc.alert_id", setter: fld_set}]}, + "alias.host": {to:[{field: "rsa.network.alias_host", setter: fld_append}]}, + "analysis.file": {to:[{field: "rsa.investigations.analysis_file", setter: fld_set}]}, + "analysis.service": {to:[{field: "rsa.investigations.analysis_service", setter: fld_set}]}, + "analysis.session": {to:[{field: "rsa.investigations.analysis_session", setter: fld_set}]}, + "app_id": {to:[{field: "rsa.misc.app_id", setter: fld_set}]}, + "attachment": {to:[{field: "rsa.file.attachment", setter: fld_set}]}, + "audit": {to:[{field: "rsa.misc.audit", setter: fld_set}]}, + "audit_class": {to:[{field: "rsa.internal.audit_class", setter: fld_set}]}, + "audit_object": {to:[{field: "rsa.misc.audit_object", setter: fld_set}]}, + "auditdata": {to:[{field: "rsa.misc.auditdata", setter: fld_set}]}, + "authmethod": {to:[{field: "rsa.identity.auth_method", setter: fld_set}]}, + "autorun_type": {to:[{field: "rsa.misc.autorun_type", setter: fld_set}]}, + "bcc": {to:[{field: "rsa.email.email", setter: fld_append}]}, + "benchmark": {to:[{field: "rsa.misc.benchmark", setter: fld_set}]}, + "binary": {to:[{field: "rsa.file.binary", setter: fld_set}]}, + "boc": {to:[{field: "rsa.investigations.boc", setter: fld_set}]}, + "bssid": {to:[{field: "rsa.wireless.wlan_ssid", setter: fld_prio, prio: 1}]}, + "bypass": {to:[{field: "rsa.misc.bypass", setter: fld_set}]}, + "c_sid": {to:[{field: "rsa.identity.user_sid_src", setter: fld_set}]}, + "cache": {to:[{field: "rsa.misc.cache", setter: fld_set}]}, + "cache_hit": {to:[{field: "rsa.misc.cache_hit", setter: fld_set}]}, + "calling_from": {to:[{field: "rsa.misc.phone", setter: fld_prio, prio: 1}]}, + "calling_to": {to:[{field: "rsa.misc.phone", setter: fld_prio, prio: 0}]}, + "category": {to:[{field: "rsa.misc.category", setter: fld_set}]}, + "cc": {to:[{field: "rsa.email.email", setter: fld_append}]}, + "cc.number": {convert: to_long, to:[{field: "rsa.misc.cc_number", setter: fld_set}]}, + "cefversion": {to:[{field: "rsa.misc.cefversion", setter: fld_set}]}, + "cert.serial": {to:[{field: "rsa.crypto.cert_serial", setter: fld_set}]}, + "cert_ca": {to:[{field: "rsa.crypto.cert_ca", setter: fld_set}]}, + "cert_checksum": {to:[{field: "rsa.crypto.cert_checksum", setter: fld_set}]}, + "cert_common": {to:[{field: "rsa.crypto.cert_common", setter: fld_set}]}, + "cert_error": {to:[{field: "rsa.crypto.cert_error", setter: fld_set}]}, + "cert_hostname": {to:[{field: "rsa.crypto.cert_host_name", setter: fld_set}]}, + "cert_hostname_cat": {to:[{field: "rsa.crypto.cert_host_cat", setter: fld_set}]}, + "cert_issuer": {to:[{field: "rsa.crypto.cert_issuer", setter: fld_set}]}, + "cert_keysize": {to:[{field: "rsa.crypto.cert_keysize", setter: fld_set}]}, + "cert_status": {to:[{field: "rsa.crypto.cert_status", setter: fld_set}]}, + "cert_subject": {to:[{field: "rsa.crypto.cert_subject", setter: fld_set}]}, + "cert_username": {to:[{field: "rsa.crypto.cert_username", setter: fld_set}]}, + "cfg.attr": {to:[{field: "rsa.misc.cfg_attr", setter: fld_set}]}, + "cfg.obj": {to:[{field: "rsa.misc.cfg_obj", setter: fld_set}]}, + "cfg.path": {to:[{field: "rsa.misc.cfg_path", setter: fld_set}]}, + "change_attribute": {to:[{field: "rsa.misc.change_attrib", setter: fld_set}]}, + "change_new": {to:[{field: "rsa.misc.change_new", setter: fld_set}]}, + "change_old": {to:[{field: "rsa.misc.change_old", setter: fld_set}]}, + "changes": {to:[{field: "rsa.misc.changes", setter: fld_set}]}, + "checksum": {to:[{field: "rsa.misc.checksum", setter: fld_set}]}, + "checksum.dst": {to:[{field: "rsa.misc.checksum_dst", setter: fld_set}]}, + "checksum.src": {to:[{field: "rsa.misc.checksum_src", setter: fld_set}]}, + "cid": {to:[{field: "rsa.internal.cid", setter: fld_set}]}, + "client": {to:[{field: "rsa.misc.client", setter: fld_prio, prio: 1}]}, + "client_ip": {to:[{field: "rsa.misc.client_ip", setter: fld_set}]}, + "clustermembers": {to:[{field: "rsa.misc.clustermembers", setter: fld_set}]}, + "cmd": {to:[{field: "rsa.misc.cmd", setter: fld_set}]}, + "cn_acttimeout": {to:[{field: "rsa.misc.cn_acttimeout", setter: fld_set}]}, + "cn_asn_dst": {to:[{field: "rsa.web.cn_asn_dst", setter: fld_set}]}, + "cn_asn_src": {to:[{field: "rsa.misc.cn_asn_src", setter: fld_set}]}, + "cn_bgpv4nxthop": {to:[{field: "rsa.misc.cn_bgpv4nxthop", setter: fld_set}]}, + "cn_ctr_dst_code": {to:[{field: "rsa.misc.cn_ctr_dst_code", setter: fld_set}]}, + "cn_dst_tos": {to:[{field: "rsa.misc.cn_dst_tos", setter: fld_set}]}, + "cn_dst_vlan": {to:[{field: "rsa.misc.cn_dst_vlan", setter: fld_set}]}, + "cn_engine_id": {to:[{field: "rsa.misc.cn_engine_id", setter: fld_set}]}, + "cn_engine_type": {to:[{field: "rsa.misc.cn_engine_type", setter: fld_set}]}, + "cn_f_switch": {to:[{field: "rsa.misc.cn_f_switch", setter: fld_set}]}, + "cn_flowsampid": {to:[{field: "rsa.misc.cn_flowsampid", setter: fld_set}]}, + "cn_flowsampintv": {to:[{field: "rsa.misc.cn_flowsampintv", setter: fld_set}]}, + "cn_flowsampmode": {to:[{field: "rsa.misc.cn_flowsampmode", setter: fld_set}]}, + "cn_inacttimeout": {to:[{field: "rsa.misc.cn_inacttimeout", setter: fld_set}]}, + "cn_inpermbyts": {to:[{field: "rsa.misc.cn_inpermbyts", setter: fld_set}]}, + "cn_inpermpckts": {to:[{field: "rsa.misc.cn_inpermpckts", setter: fld_set}]}, + "cn_invalid": {to:[{field: "rsa.misc.cn_invalid", setter: fld_set}]}, + "cn_ip_proto_ver": {to:[{field: "rsa.misc.cn_ip_proto_ver", setter: fld_set}]}, + "cn_ipv4_ident": {to:[{field: "rsa.misc.cn_ipv4_ident", setter: fld_set}]}, + "cn_l_switch": {to:[{field: "rsa.misc.cn_l_switch", setter: fld_set}]}, + "cn_log_did": {to:[{field: "rsa.misc.cn_log_did", setter: fld_set}]}, + "cn_log_rid": {to:[{field: "rsa.misc.cn_log_rid", setter: fld_set}]}, + "cn_max_ttl": {to:[{field: "rsa.misc.cn_max_ttl", setter: fld_set}]}, + "cn_maxpcktlen": {to:[{field: "rsa.misc.cn_maxpcktlen", setter: fld_set}]}, + "cn_min_ttl": {to:[{field: "rsa.misc.cn_min_ttl", setter: fld_set}]}, + "cn_minpcktlen": {to:[{field: "rsa.misc.cn_minpcktlen", setter: fld_set}]}, + "cn_mpls_lbl_1": {to:[{field: "rsa.misc.cn_mpls_lbl_1", setter: fld_set}]}, + "cn_mpls_lbl_10": {to:[{field: "rsa.misc.cn_mpls_lbl_10", setter: fld_set}]}, + "cn_mpls_lbl_2": {to:[{field: "rsa.misc.cn_mpls_lbl_2", setter: fld_set}]}, + "cn_mpls_lbl_3": {to:[{field: "rsa.misc.cn_mpls_lbl_3", setter: fld_set}]}, + "cn_mpls_lbl_4": {to:[{field: "rsa.misc.cn_mpls_lbl_4", setter: fld_set}]}, + "cn_mpls_lbl_5": {to:[{field: "rsa.misc.cn_mpls_lbl_5", setter: fld_set}]}, + "cn_mpls_lbl_6": {to:[{field: "rsa.misc.cn_mpls_lbl_6", setter: fld_set}]}, + "cn_mpls_lbl_7": {to:[{field: "rsa.misc.cn_mpls_lbl_7", setter: fld_set}]}, + "cn_mpls_lbl_8": {to:[{field: "rsa.misc.cn_mpls_lbl_8", setter: fld_set}]}, + "cn_mpls_lbl_9": {to:[{field: "rsa.misc.cn_mpls_lbl_9", setter: fld_set}]}, + "cn_mplstoplabel": {to:[{field: "rsa.misc.cn_mplstoplabel", setter: fld_set}]}, + "cn_mplstoplabip": {to:[{field: "rsa.misc.cn_mplstoplabip", setter: fld_set}]}, + "cn_mul_dst_byt": {to:[{field: "rsa.misc.cn_mul_dst_byt", setter: fld_set}]}, + "cn_mul_dst_pks": {to:[{field: "rsa.misc.cn_mul_dst_pks", setter: fld_set}]}, + "cn_muligmptype": {to:[{field: "rsa.misc.cn_muligmptype", setter: fld_set}]}, + "cn_rpackets": {to:[{field: "rsa.web.cn_rpackets", setter: fld_set}]}, + "cn_sampalgo": {to:[{field: "rsa.misc.cn_sampalgo", setter: fld_set}]}, + "cn_sampint": {to:[{field: "rsa.misc.cn_sampint", setter: fld_set}]}, + "cn_seqctr": {to:[{field: "rsa.misc.cn_seqctr", setter: fld_set}]}, + "cn_spackets": {to:[{field: "rsa.misc.cn_spackets", setter: fld_set}]}, + "cn_src_tos": {to:[{field: "rsa.misc.cn_src_tos", setter: fld_set}]}, + "cn_src_vlan": {to:[{field: "rsa.misc.cn_src_vlan", setter: fld_set}]}, + "cn_sysuptime": {to:[{field: "rsa.misc.cn_sysuptime", setter: fld_set}]}, + "cn_template_id": {to:[{field: "rsa.misc.cn_template_id", setter: fld_set}]}, + "cn_totbytsexp": {to:[{field: "rsa.misc.cn_totbytsexp", setter: fld_set}]}, + "cn_totflowexp": {to:[{field: "rsa.misc.cn_totflowexp", setter: fld_set}]}, + "cn_totpcktsexp": {to:[{field: "rsa.misc.cn_totpcktsexp", setter: fld_set}]}, + "cn_unixnanosecs": {to:[{field: "rsa.misc.cn_unixnanosecs", setter: fld_set}]}, + "cn_v6flowlabel": {to:[{field: "rsa.misc.cn_v6flowlabel", setter: fld_set}]}, + "cn_v6optheaders": {to:[{field: "rsa.misc.cn_v6optheaders", setter: fld_set}]}, + "code": {to:[{field: "rsa.misc.code", setter: fld_set}]}, + "command": {to:[{field: "rsa.misc.command", setter: fld_set}]}, + "comments": {to:[{field: "rsa.misc.comments", setter: fld_set}]}, + "comp_class": {to:[{field: "rsa.misc.comp_class", setter: fld_set}]}, + "comp_name": {to:[{field: "rsa.misc.comp_name", setter: fld_set}]}, + "comp_rbytes": {to:[{field: "rsa.misc.comp_rbytes", setter: fld_set}]}, + "comp_sbytes": {to:[{field: "rsa.misc.comp_sbytes", setter: fld_set}]}, + "component_version": {to:[{field: "rsa.misc.comp_version", setter: fld_set}]}, + "connection_id": {to:[{field: "rsa.misc.connection_id", setter: fld_prio, prio: 1}]}, + "connectionid": {to:[{field: "rsa.misc.connection_id", setter: fld_prio, prio: 0}]}, + "content": {to:[{field: "rsa.misc.content", setter: fld_set}]}, + "content_type": {to:[{field: "rsa.misc.content_type", setter: fld_set}]}, + "content_version": {to:[{field: "rsa.misc.content_version", setter: fld_set}]}, + "context": {to:[{field: "rsa.misc.context", setter: fld_set}]}, + "count": {to:[{field: "rsa.misc.count", setter: fld_set}]}, + "cpu": {convert: to_long, to:[{field: "rsa.misc.cpu", setter: fld_set}]}, + "cpu_data": {to:[{field: "rsa.misc.cpu_data", setter: fld_set}]}, + "criticality": {to:[{field: "rsa.misc.criticality", setter: fld_set}]}, + "cs_agency_dst": {to:[{field: "rsa.misc.cs_agency_dst", setter: fld_set}]}, + "cs_analyzedby": {to:[{field: "rsa.misc.cs_analyzedby", setter: fld_set}]}, + "cs_av_other": {to:[{field: "rsa.misc.cs_av_other", setter: fld_set}]}, + "cs_av_primary": {to:[{field: "rsa.misc.cs_av_primary", setter: fld_set}]}, + "cs_av_secondary": {to:[{field: "rsa.misc.cs_av_secondary", setter: fld_set}]}, + "cs_bgpv6nxthop": {to:[{field: "rsa.misc.cs_bgpv6nxthop", setter: fld_set}]}, + "cs_bit9status": {to:[{field: "rsa.misc.cs_bit9status", setter: fld_set}]}, + "cs_context": {to:[{field: "rsa.misc.cs_context", setter: fld_set}]}, + "cs_control": {to:[{field: "rsa.misc.cs_control", setter: fld_set}]}, + "cs_data": {to:[{field: "rsa.misc.cs_data", setter: fld_set}]}, + "cs_datecret": {to:[{field: "rsa.misc.cs_datecret", setter: fld_set}]}, + "cs_dst_tld": {to:[{field: "rsa.misc.cs_dst_tld", setter: fld_set}]}, + "cs_eth_dst_ven": {to:[{field: "rsa.misc.cs_eth_dst_ven", setter: fld_set}]}, + "cs_eth_src_ven": {to:[{field: "rsa.misc.cs_eth_src_ven", setter: fld_set}]}, + "cs_event_uuid": {to:[{field: "rsa.misc.cs_event_uuid", setter: fld_set}]}, + "cs_filetype": {to:[{field: "rsa.misc.cs_filetype", setter: fld_set}]}, + "cs_fld": {to:[{field: "rsa.misc.cs_fld", setter: fld_set}]}, + "cs_if_desc": {to:[{field: "rsa.misc.cs_if_desc", setter: fld_set}]}, + "cs_if_name": {to:[{field: "rsa.misc.cs_if_name", setter: fld_set}]}, + "cs_ip_next_hop": {to:[{field: "rsa.misc.cs_ip_next_hop", setter: fld_set}]}, + "cs_ipv4dstpre": {to:[{field: "rsa.misc.cs_ipv4dstpre", setter: fld_set}]}, + "cs_ipv4srcpre": {to:[{field: "rsa.misc.cs_ipv4srcpre", setter: fld_set}]}, + "cs_lifetime": {to:[{field: "rsa.misc.cs_lifetime", setter: fld_set}]}, + "cs_log_medium": {to:[{field: "rsa.misc.cs_log_medium", setter: fld_set}]}, + "cs_loginname": {to:[{field: "rsa.misc.cs_loginname", setter: fld_set}]}, + "cs_modulescore": {to:[{field: "rsa.misc.cs_modulescore", setter: fld_set}]}, + "cs_modulesign": {to:[{field: "rsa.misc.cs_modulesign", setter: fld_set}]}, + "cs_opswatresult": {to:[{field: "rsa.misc.cs_opswatresult", setter: fld_set}]}, + "cs_payload": {to:[{field: "rsa.misc.cs_payload", setter: fld_set}]}, + "cs_registrant": {to:[{field: "rsa.misc.cs_registrant", setter: fld_set}]}, + "cs_registrar": {to:[{field: "rsa.misc.cs_registrar", setter: fld_set}]}, + "cs_represult": {to:[{field: "rsa.misc.cs_represult", setter: fld_set}]}, + "cs_rpayload": {to:[{field: "rsa.misc.cs_rpayload", setter: fld_set}]}, + "cs_sampler_name": {to:[{field: "rsa.misc.cs_sampler_name", setter: fld_set}]}, + "cs_sourcemodule": {to:[{field: "rsa.misc.cs_sourcemodule", setter: fld_set}]}, + "cs_streams": {to:[{field: "rsa.misc.cs_streams", setter: fld_set}]}, + "cs_targetmodule": {to:[{field: "rsa.misc.cs_targetmodule", setter: fld_set}]}, + "cs_v6nxthop": {to:[{field: "rsa.misc.cs_v6nxthop", setter: fld_set}]}, + "cs_whois_server": {to:[{field: "rsa.misc.cs_whois_server", setter: fld_set}]}, + "cs_yararesult": {to:[{field: "rsa.misc.cs_yararesult", setter: fld_set}]}, + "cve": {to:[{field: "rsa.misc.cve", setter: fld_set}]}, + "d_certauth": {to:[{field: "rsa.crypto.d_certauth", setter: fld_set}]}, + "d_cipher": {to:[{field: "rsa.crypto.cipher_dst", setter: fld_set}]}, + "d_ciphersize": {convert: to_long, to:[{field: "rsa.crypto.cipher_size_dst", setter: fld_set}]}, + "d_sslver": {to:[{field: "rsa.crypto.ssl_ver_dst", setter: fld_set}]}, + "data": {to:[{field: "rsa.internal.data", setter: fld_set}]}, + "data_type": {to:[{field: "rsa.misc.data_type", setter: fld_set}]}, + "date": {to:[{field: "rsa.time.date", setter: fld_set}]}, + "datetime": {to:[{field: "rsa.time.datetime", setter: fld_set}]}, + "day": {to:[{field: "rsa.time.day", setter: fld_set}]}, + "db_id": {to:[{field: "rsa.db.db_id", setter: fld_set}]}, + "db_name": {to:[{field: "rsa.db.database", setter: fld_set}]}, + "db_pid": {convert: to_long, to:[{field: "rsa.db.db_pid", setter: fld_set}]}, + "dclass_counter1": {convert: to_long, to:[{field: "rsa.counters.dclass_c1", setter: fld_set}]}, + "dclass_counter1_string": {to:[{field: "rsa.counters.dclass_c1_str", setter: fld_set}]}, + "dclass_counter2": {convert: to_long, to:[{field: "rsa.counters.dclass_c2", setter: fld_set}]}, + "dclass_counter2_string": {to:[{field: "rsa.counters.dclass_c2_str", setter: fld_set}]}, + "dclass_counter3": {convert: to_long, to:[{field: "rsa.counters.dclass_c3", setter: fld_set}]}, + "dclass_counter3_string": {to:[{field: "rsa.counters.dclass_c3_str", setter: fld_set}]}, + "dclass_ratio1": {to:[{field: "rsa.counters.dclass_r1", setter: fld_set}]}, + "dclass_ratio1_string": {to:[{field: "rsa.counters.dclass_r1_str", setter: fld_set}]}, + "dclass_ratio2": {to:[{field: "rsa.counters.dclass_r2", setter: fld_set}]}, + "dclass_ratio2_string": {to:[{field: "rsa.counters.dclass_r2_str", setter: fld_set}]}, + "dclass_ratio3": {to:[{field: "rsa.counters.dclass_r3", setter: fld_set}]}, + "dclass_ratio3_string": {to:[{field: "rsa.counters.dclass_r3_str", setter: fld_set}]}, + "dead": {convert: to_long, to:[{field: "rsa.internal.dead", setter: fld_set}]}, + "description": {to:[{field: "rsa.misc.description", setter: fld_set}]}, + "detail": {to:[{field: "rsa.misc.event_desc", setter: fld_set}]}, + "device": {to:[{field: "rsa.misc.device_name", setter: fld_set}]}, + "device.class": {to:[{field: "rsa.internal.device_class", setter: fld_set}]}, + "device.group": {to:[{field: "rsa.internal.device_group", setter: fld_set}]}, + "device.host": {to:[{field: "rsa.internal.device_host", setter: fld_set}]}, + "device.ip": {convert: to_ip, to:[{field: "rsa.internal.device_ip", setter: fld_set}]}, + "device.ipv6": {convert: to_ip, to:[{field: "rsa.internal.device_ipv6", setter: fld_set}]}, + "device.type": {to:[{field: "rsa.internal.device_type", setter: fld_set}]}, + "device.type.id": {convert: to_long, to:[{field: "rsa.internal.device_type_id", setter: fld_set}]}, + "devicehostname": {to:[{field: "rsa.network.alias_host", setter: fld_append}]}, + "devvendor": {to:[{field: "rsa.misc.devvendor", setter: fld_set}]}, + "dhost": {to:[{field: "rsa.network.host_dst", setter: fld_set}]}, + "did": {to:[{field: "rsa.internal.did", setter: fld_set}]}, + "dinterface": {to:[{field: "rsa.network.dinterface", setter: fld_set}]}, + "directory.dst": {to:[{field: "rsa.file.directory_dst", setter: fld_set}]}, + "directory.src": {to:[{field: "rsa.file.directory_src", setter: fld_set}]}, + "disk_volume": {to:[{field: "rsa.storage.disk_volume", setter: fld_set}]}, + "disposition": {to:[{field: "rsa.misc.disposition", setter: fld_set}]}, + "distance": {to:[{field: "rsa.misc.distance", setter: fld_set}]}, + "dmask": {to:[{field: "rsa.network.dmask", setter: fld_set}]}, + "dn": {to:[{field: "rsa.identity.dn", setter: fld_set}]}, + "dns_a_record": {to:[{field: "rsa.network.dns_a_record", setter: fld_set}]}, + "dns_cname_record": {to:[{field: "rsa.network.dns_cname_record", setter: fld_set}]}, + "dns_id": {to:[{field: "rsa.network.dns_id", setter: fld_set}]}, + "dns_opcode": {to:[{field: "rsa.network.dns_opcode", setter: fld_set}]}, + "dns_ptr_record": {to:[{field: "rsa.network.dns_ptr_record", setter: fld_set}]}, + "dns_resp": {to:[{field: "rsa.network.dns_resp", setter: fld_set}]}, + "dns_type": {to:[{field: "rsa.network.dns_type", setter: fld_set}]}, + "doc_number": {convert: to_long, to:[{field: "rsa.misc.doc_number", setter: fld_set}]}, + "domain": {to:[{field: "rsa.network.domain", setter: fld_set}]}, + "domain1": {to:[{field: "rsa.network.domain1", setter: fld_set}]}, + "dst_dn": {to:[{field: "rsa.identity.dn_dst", setter: fld_set}]}, + "dst_payload": {to:[{field: "rsa.misc.payload_dst", setter: fld_set}]}, + "dst_spi": {to:[{field: "rsa.misc.spi_dst", setter: fld_set}]}, + "dst_zone": {to:[{field: "rsa.network.zone_dst", setter: fld_set}]}, + "dstburb": {to:[{field: "rsa.misc.dstburb", setter: fld_set}]}, + "duration": {convert: to_double, to:[{field: "rsa.time.duration_time", setter: fld_set}]}, + "duration_string": {to:[{field: "rsa.time.duration_str", setter: fld_set}]}, + "ec_activity": {to:[{field: "rsa.investigations.ec_activity", setter: fld_set}]}, + "ec_outcome": {to:[{field: "rsa.investigations.ec_outcome", setter: fld_set}]}, + "ec_subject": {to:[{field: "rsa.investigations.ec_subject", setter: fld_set}]}, + "ec_theme": {to:[{field: "rsa.investigations.ec_theme", setter: fld_set}]}, + "edomain": {to:[{field: "rsa.misc.edomain", setter: fld_set}]}, + "edomaub": {to:[{field: "rsa.misc.edomaub", setter: fld_set}]}, + "effective_time": {convert: to_date, to:[{field: "rsa.time.effective_time", setter: fld_set}]}, + "ein.number": {convert: to_long, to:[{field: "rsa.misc.ein_number", setter: fld_set}]}, + "email": {to:[{field: "rsa.email.email", setter: fld_append}]}, + "encryption_type": {to:[{field: "rsa.crypto.crypto", setter: fld_set}]}, + "endtime": {convert: to_date, to:[{field: "rsa.time.endtime", setter: fld_set}]}, + "entropy.req": {convert: to_long, to:[{field: "rsa.internal.entropy_req", setter: fld_set}]}, + "entropy.res": {convert: to_long, to:[{field: "rsa.internal.entropy_res", setter: fld_set}]}, + "entry": {to:[{field: "rsa.internal.entry", setter: fld_set}]}, + "eoc": {to:[{field: "rsa.investigations.eoc", setter: fld_set}]}, + "error": {to:[{field: "rsa.misc.error", setter: fld_set}]}, + "eth_type": {convert: to_long, to:[{field: "rsa.network.eth_type", setter: fld_set}]}, + "euid": {to:[{field: "rsa.misc.euid", setter: fld_set}]}, + "event.cat": {convert: to_long, to:[{field: "rsa.investigations.event_cat", setter: fld_prio, prio: 1}]}, + "event.cat.name": {to:[{field: "rsa.investigations.event_cat_name", setter: fld_prio, prio: 1}]}, + "event_cat": {convert: to_long, to:[{field: "rsa.investigations.event_cat", setter: fld_prio, prio: 0}]}, + "event_cat_name": {to:[{field: "rsa.investigations.event_cat_name", setter: fld_prio, prio: 0}]}, + "event_category": {to:[{field: "rsa.misc.event_category", setter: fld_set}]}, + "event_computer": {to:[{field: "rsa.misc.event_computer", setter: fld_set}]}, + "event_counter": {convert: to_long, to:[{field: "rsa.counters.event_counter", setter: fld_set}]}, + "event_description": {to:[{field: "rsa.internal.event_desc", setter: fld_set}]}, + "event_id": {to:[{field: "rsa.misc.event_id", setter: fld_set}]}, + "event_log": {to:[{field: "rsa.misc.event_log", setter: fld_set}]}, + "event_name": {to:[{field: "rsa.internal.event_name", setter: fld_set}]}, + "event_queue_time": {convert: to_date, to:[{field: "rsa.time.event_queue_time", setter: fld_set}]}, + "event_source": {to:[{field: "rsa.misc.event_source", setter: fld_set}]}, + "event_state": {to:[{field: "rsa.misc.event_state", setter: fld_set}]}, + "event_time": {convert: to_date, to:[{field: "rsa.time.event_time", setter: fld_set}]}, + "event_time_str": {to:[{field: "rsa.time.event_time_str", setter: fld_prio, prio: 1}]}, + "event_time_string": {to:[{field: "rsa.time.event_time_str", setter: fld_prio, prio: 0}]}, + "event_type": {to:[{field: "rsa.misc.event_type", setter: fld_set}]}, + "event_user": {to:[{field: "rsa.misc.event_user", setter: fld_set}]}, + "eventtime": {to:[{field: "rsa.time.eventtime", setter: fld_set}]}, + "expected_val": {to:[{field: "rsa.misc.expected_val", setter: fld_set}]}, + "expiration_time": {convert: to_date, to:[{field: "rsa.time.expire_time", setter: fld_set}]}, + "expiration_time_string": {to:[{field: "rsa.time.expire_time_str", setter: fld_set}]}, + "facility": {to:[{field: "rsa.misc.facility", setter: fld_set}]}, + "facilityname": {to:[{field: "rsa.misc.facilityname", setter: fld_set}]}, + "faddr": {to:[{field: "rsa.network.faddr", setter: fld_set}]}, + "fcatnum": {to:[{field: "rsa.misc.fcatnum", setter: fld_set}]}, + "federated_idp": {to:[{field: "rsa.identity.federated_idp", setter: fld_set}]}, + "federated_sp": {to:[{field: "rsa.identity.federated_sp", setter: fld_set}]}, + "feed.category": {to:[{field: "rsa.internal.feed_category", setter: fld_set}]}, + "feed_desc": {to:[{field: "rsa.internal.feed_desc", setter: fld_set}]}, + "feed_name": {to:[{field: "rsa.internal.feed_name", setter: fld_set}]}, + "fhost": {to:[{field: "rsa.network.fhost", setter: fld_set}]}, + "file_entropy": {convert: to_double, to:[{field: "rsa.file.file_entropy", setter: fld_set}]}, + "file_vendor": {to:[{field: "rsa.file.file_vendor", setter: fld_set}]}, + "filename_dst": {to:[{field: "rsa.file.filename_dst", setter: fld_set}]}, + "filename_src": {to:[{field: "rsa.file.filename_src", setter: fld_set}]}, + "filename_tmp": {to:[{field: "rsa.file.filename_tmp", setter: fld_set}]}, + "filesystem": {to:[{field: "rsa.file.filesystem", setter: fld_set}]}, + "filter": {to:[{field: "rsa.misc.filter", setter: fld_set}]}, + "finterface": {to:[{field: "rsa.misc.finterface", setter: fld_set}]}, + "flags": {to:[{field: "rsa.misc.flags", setter: fld_set}]}, + "forensic_info": {to:[{field: "rsa.misc.forensic_info", setter: fld_set}]}, + "forward.ip": {convert: to_ip, to:[{field: "rsa.internal.forward_ip", setter: fld_set}]}, + "forward.ipv6": {convert: to_ip, to:[{field: "rsa.internal.forward_ipv6", setter: fld_set}]}, + "found": {to:[{field: "rsa.misc.found", setter: fld_set}]}, + "fport": {to:[{field: "rsa.network.fport", setter: fld_set}]}, + "fqdn": {to:[{field: "rsa.web.fqdn", setter: fld_set}]}, + "fresult": {convert: to_long, to:[{field: "rsa.misc.fresult", setter: fld_set}]}, + "from": {to:[{field: "rsa.email.email_src", setter: fld_set}]}, + "gaddr": {to:[{field: "rsa.misc.gaddr", setter: fld_set}]}, + "gateway": {to:[{field: "rsa.network.gateway", setter: fld_set}]}, + "gmtdate": {to:[{field: "rsa.time.gmtdate", setter: fld_set}]}, + "gmttime": {to:[{field: "rsa.time.gmttime", setter: fld_set}]}, + "group": {to:[{field: "rsa.misc.group", setter: fld_set}]}, + "group_object": {to:[{field: "rsa.misc.group_object", setter: fld_set}]}, + "groupid": {to:[{field: "rsa.misc.group_id", setter: fld_set}]}, + "h_code": {to:[{field: "rsa.internal.hcode", setter: fld_set}]}, + "hardware_id": {to:[{field: "rsa.misc.hardware_id", setter: fld_set}]}, + "header.id": {to:[{field: "rsa.internal.header_id", setter: fld_set}]}, + "host.orig": {to:[{field: "rsa.network.host_orig", setter: fld_set}]}, + "host.state": {to:[{field: "rsa.endpoint.host_state", setter: fld_set}]}, + "host.type": {to:[{field: "rsa.network.host_type", setter: fld_set}]}, + "host_role": {to:[{field: "rsa.identity.host_role", setter: fld_set}]}, + "hostid": {to:[{field: "rsa.network.alias_host", setter: fld_append}]}, + "hostname": {to:[{field: "rsa.network.alias_host", setter: fld_append}]}, + "hour": {to:[{field: "rsa.time.hour", setter: fld_set}]}, + "https.insact": {to:[{field: "rsa.crypto.https_insact", setter: fld_set}]}, + "https.valid": {to:[{field: "rsa.crypto.https_valid", setter: fld_set}]}, + "icmpcode": {convert: to_long, to:[{field: "rsa.network.icmp_code", setter: fld_set}]}, + "icmptype": {convert: to_long, to:[{field: "rsa.network.icmp_type", setter: fld_set}]}, + "id": {to:[{field: "rsa.misc.reference_id", setter: fld_set}]}, + "id1": {to:[{field: "rsa.misc.reference_id1", setter: fld_set}]}, + "id2": {to:[{field: "rsa.misc.reference_id2", setter: fld_set}]}, + "id3": {to:[{field: "rsa.misc.id3", setter: fld_set}]}, + "ike": {to:[{field: "rsa.crypto.ike", setter: fld_set}]}, + "ike_cookie1": {to:[{field: "rsa.crypto.ike_cookie1", setter: fld_set}]}, + "ike_cookie2": {to:[{field: "rsa.crypto.ike_cookie2", setter: fld_set}]}, + "im_buddyid": {to:[{field: "rsa.misc.im_buddyid", setter: fld_set}]}, + "im_buddyname": {to:[{field: "rsa.misc.im_buddyname", setter: fld_set}]}, + "im_client": {to:[{field: "rsa.misc.im_client", setter: fld_set}]}, + "im_croomid": {to:[{field: "rsa.misc.im_croomid", setter: fld_set}]}, + "im_croomtype": {to:[{field: "rsa.misc.im_croomtype", setter: fld_set}]}, + "im_members": {to:[{field: "rsa.misc.im_members", setter: fld_set}]}, + "im_userid": {to:[{field: "rsa.misc.im_userid", setter: fld_set}]}, + "im_username": {to:[{field: "rsa.misc.im_username", setter: fld_set}]}, + "index": {to:[{field: "rsa.misc.index", setter: fld_set}]}, + "info": {to:[{field: "rsa.db.index", setter: fld_set}]}, + "inode": {convert: to_long, to:[{field: "rsa.internal.inode", setter: fld_set}]}, + "inout": {to:[{field: "rsa.misc.inout", setter: fld_set}]}, + "instance": {to:[{field: "rsa.db.instance", setter: fld_set}]}, + "interface": {to:[{field: "rsa.network.interface", setter: fld_set}]}, + "inv.category": {to:[{field: "rsa.investigations.inv_category", setter: fld_set}]}, + "inv.context": {to:[{field: "rsa.investigations.inv_context", setter: fld_set}]}, + "ioc": {to:[{field: "rsa.investigations.ioc", setter: fld_set}]}, + "ip_proto": {convert: to_long, to:[{field: "rsa.network.ip_proto", setter: fld_set}]}, + "ipkt": {to:[{field: "rsa.misc.ipkt", setter: fld_set}]}, + "ipscat": {to:[{field: "rsa.misc.ipscat", setter: fld_set}]}, + "ipspri": {to:[{field: "rsa.misc.ipspri", setter: fld_set}]}, + "jobname": {to:[{field: "rsa.misc.jobname", setter: fld_set}]}, + "jobnum": {to:[{field: "rsa.misc.job_num", setter: fld_set}]}, + "laddr": {to:[{field: "rsa.network.laddr", setter: fld_set}]}, + "language": {to:[{field: "rsa.misc.language", setter: fld_set}]}, + "latitude": {to:[{field: "rsa.misc.latitude", setter: fld_set}]}, + "lc.cid": {to:[{field: "rsa.internal.lc_cid", setter: fld_set}]}, + "lc.ctime": {convert: to_date, to:[{field: "rsa.internal.lc_ctime", setter: fld_set}]}, + "ldap": {to:[{field: "rsa.identity.ldap", setter: fld_set}]}, + "ldap.query": {to:[{field: "rsa.identity.ldap_query", setter: fld_set}]}, + "ldap.response": {to:[{field: "rsa.identity.ldap_response", setter: fld_set}]}, + "level": {convert: to_long, to:[{field: "rsa.internal.level", setter: fld_set}]}, + "lhost": {to:[{field: "rsa.network.lhost", setter: fld_set}]}, + "library": {to:[{field: "rsa.misc.library", setter: fld_set}]}, + "lifetime": {convert: to_long, to:[{field: "rsa.misc.lifetime", setter: fld_set}]}, + "linenum": {to:[{field: "rsa.misc.linenum", setter: fld_set}]}, + "link": {to:[{field: "rsa.misc.link", setter: fld_set}]}, + "linterface": {to:[{field: "rsa.network.linterface", setter: fld_set}]}, + "list_name": {to:[{field: "rsa.misc.list_name", setter: fld_set}]}, + "listnum": {to:[{field: "rsa.misc.listnum", setter: fld_set}]}, + "load_data": {to:[{field: "rsa.misc.load_data", setter: fld_set}]}, + "location_floor": {to:[{field: "rsa.misc.location_floor", setter: fld_set}]}, + "location_mark": {to:[{field: "rsa.misc.location_mark", setter: fld_set}]}, + "log_id": {to:[{field: "rsa.misc.log_id", setter: fld_set}]}, + "log_type": {to:[{field: "rsa.misc.log_type", setter: fld_set}]}, + "logid": {to:[{field: "rsa.misc.logid", setter: fld_set}]}, + "logip": {to:[{field: "rsa.misc.logip", setter: fld_set}]}, + "logname": {to:[{field: "rsa.misc.logname", setter: fld_set}]}, + "logon_type": {to:[{field: "rsa.identity.logon_type", setter: fld_set}]}, + "logon_type_desc": {to:[{field: "rsa.identity.logon_type_desc", setter: fld_set}]}, + "longitude": {to:[{field: "rsa.misc.longitude", setter: fld_set}]}, + "lport": {to:[{field: "rsa.misc.lport", setter: fld_set}]}, + "lread": {convert: to_long, to:[{field: "rsa.db.lread", setter: fld_set}]}, + "lun": {to:[{field: "rsa.storage.lun", setter: fld_set}]}, + "lwrite": {convert: to_long, to:[{field: "rsa.db.lwrite", setter: fld_set}]}, + "macaddr": {convert: to_mac, to:[{field: "rsa.network.eth_host", setter: fld_set}]}, + "mail_id": {to:[{field: "rsa.misc.mail_id", setter: fld_set}]}, + "mask": {to:[{field: "rsa.network.mask", setter: fld_set}]}, + "match": {to:[{field: "rsa.misc.match", setter: fld_set}]}, + "mbug_data": {to:[{field: "rsa.misc.mbug_data", setter: fld_set}]}, + "mcb.req": {convert: to_long, to:[{field: "rsa.internal.mcb_req", setter: fld_set}]}, + "mcb.res": {convert: to_long, to:[{field: "rsa.internal.mcb_res", setter: fld_set}]}, + "mcbc.req": {convert: to_long, to:[{field: "rsa.internal.mcbc_req", setter: fld_set}]}, + "mcbc.res": {convert: to_long, to:[{field: "rsa.internal.mcbc_res", setter: fld_set}]}, + "medium": {convert: to_long, to:[{field: "rsa.internal.medium", setter: fld_set}]}, + "message": {to:[{field: "rsa.internal.message", setter: fld_set}]}, + "message_body": {to:[{field: "rsa.misc.message_body", setter: fld_set}]}, + "messageid": {to:[{field: "rsa.internal.messageid", setter: fld_set}]}, + "min": {to:[{field: "rsa.time.min", setter: fld_set}]}, + "misc": {to:[{field: "rsa.misc.misc", setter: fld_set}]}, + "misc_name": {to:[{field: "rsa.misc.misc_name", setter: fld_set}]}, + "mode": {to:[{field: "rsa.misc.mode", setter: fld_set}]}, + "month": {to:[{field: "rsa.time.month", setter: fld_set}]}, + "msg": {to:[{field: "rsa.internal.msg", setter: fld_set}]}, + "msgIdPart1": {to:[{field: "rsa.misc.msgIdPart1", setter: fld_set}]}, + "msgIdPart2": {to:[{field: "rsa.misc.msgIdPart2", setter: fld_set}]}, + "msgIdPart3": {to:[{field: "rsa.misc.msgIdPart3", setter: fld_set}]}, + "msgIdPart4": {to:[{field: "rsa.misc.msgIdPart4", setter: fld_set}]}, + "msg_id": {to:[{field: "rsa.internal.msg_id", setter: fld_set}]}, + "msg_type": {to:[{field: "rsa.misc.msg_type", setter: fld_set}]}, + "msgid": {to:[{field: "rsa.misc.msgid", setter: fld_set}]}, + "name": {to:[{field: "rsa.misc.name", setter: fld_set}]}, + "netname": {to:[{field: "rsa.network.netname", setter: fld_set}]}, + "netsessid": {to:[{field: "rsa.misc.netsessid", setter: fld_set}]}, + "network_port": {convert: to_long, to:[{field: "rsa.network.network_port", setter: fld_set}]}, + "network_service": {to:[{field: "rsa.network.network_service", setter: fld_set}]}, + "node": {to:[{field: "rsa.misc.node", setter: fld_set}]}, + "nodename": {to:[{field: "rsa.internal.node_name", setter: fld_set}]}, + "ntype": {to:[{field: "rsa.misc.ntype", setter: fld_set}]}, + "num": {to:[{field: "rsa.misc.num", setter: fld_set}]}, + "number": {to:[{field: "rsa.misc.number", setter: fld_set}]}, + "number1": {to:[{field: "rsa.misc.number1", setter: fld_set}]}, + "number2": {to:[{field: "rsa.misc.number2", setter: fld_set}]}, + "nwe.callback_id": {to:[{field: "rsa.internal.nwe_callback_id", setter: fld_set}]}, + "nwwn": {to:[{field: "rsa.misc.nwwn", setter: fld_set}]}, + "obj_id": {to:[{field: "rsa.internal.obj_id", setter: fld_set}]}, + "obj_name": {to:[{field: "rsa.misc.obj_name", setter: fld_set}]}, + "obj_server": {to:[{field: "rsa.internal.obj_server", setter: fld_set}]}, + "obj_type": {to:[{field: "rsa.misc.obj_type", setter: fld_set}]}, + "obj_value": {to:[{field: "rsa.internal.obj_val", setter: fld_set}]}, + "object": {to:[{field: "rsa.misc.object", setter: fld_set}]}, + "observed_val": {to:[{field: "rsa.misc.observed_val", setter: fld_set}]}, + "operation": {to:[{field: "rsa.misc.operation", setter: fld_set}]}, + "operation_id": {to:[{field: "rsa.misc.operation_id", setter: fld_set}]}, + "opkt": {to:[{field: "rsa.misc.opkt", setter: fld_set}]}, + "org.dst": {to:[{field: "rsa.physical.org_dst", setter: fld_prio, prio: 1}]}, + "org.src": {to:[{field: "rsa.physical.org_src", setter: fld_set}]}, + "org_dst": {to:[{field: "rsa.physical.org_dst", setter: fld_prio, prio: 0}]}, + "orig_from": {to:[{field: "rsa.misc.orig_from", setter: fld_set}]}, + "origin": {to:[{field: "rsa.network.origin", setter: fld_set}]}, + "original_owner": {to:[{field: "rsa.identity.owner", setter: fld_set}]}, + "os": {to:[{field: "rsa.misc.OS", setter: fld_set}]}, + "owner_id": {to:[{field: "rsa.misc.owner_id", setter: fld_set}]}, + "p_action": {to:[{field: "rsa.misc.p_action", setter: fld_set}]}, + "p_date": {to:[{field: "rsa.time.p_date", setter: fld_set}]}, + "p_filter": {to:[{field: "rsa.misc.p_filter", setter: fld_set}]}, + "p_group_object": {to:[{field: "rsa.misc.p_group_object", setter: fld_set}]}, + "p_id": {to:[{field: "rsa.misc.p_id", setter: fld_set}]}, + "p_month": {to:[{field: "rsa.time.p_month", setter: fld_set}]}, + "p_msgid": {to:[{field: "rsa.misc.p_msgid", setter: fld_set}]}, + "p_msgid1": {to:[{field: "rsa.misc.p_msgid1", setter: fld_set}]}, + "p_msgid2": {to:[{field: "rsa.misc.p_msgid2", setter: fld_set}]}, + "p_result1": {to:[{field: "rsa.misc.p_result1", setter: fld_set}]}, + "p_time": {to:[{field: "rsa.time.p_time", setter: fld_set}]}, + "p_time1": {to:[{field: "rsa.time.p_time1", setter: fld_set}]}, + "p_time2": {to:[{field: "rsa.time.p_time2", setter: fld_set}]}, + "p_url": {to:[{field: "rsa.web.p_url", setter: fld_set}]}, + "p_user_agent": {to:[{field: "rsa.web.p_user_agent", setter: fld_set}]}, + "p_web_cookie": {to:[{field: "rsa.web.p_web_cookie", setter: fld_set}]}, + "p_web_method": {to:[{field: "rsa.web.p_web_method", setter: fld_set}]}, + "p_web_referer": {to:[{field: "rsa.web.p_web_referer", setter: fld_set}]}, + "p_year": {to:[{field: "rsa.time.p_year", setter: fld_set}]}, + "packet_length": {to:[{field: "rsa.network.packet_length", setter: fld_set}]}, + "paddr": {convert: to_ip, to:[{field: "rsa.network.paddr", setter: fld_set}]}, + "param": {to:[{field: "rsa.misc.param", setter: fld_set}]}, + "param.dst": {to:[{field: "rsa.misc.param_dst", setter: fld_set}]}, + "param.src": {to:[{field: "rsa.misc.param_src", setter: fld_set}]}, + "parent_node": {to:[{field: "rsa.misc.parent_node", setter: fld_set}]}, + "parse.error": {to:[{field: "rsa.internal.parse_error", setter: fld_set}]}, + "password": {to:[{field: "rsa.identity.password", setter: fld_set}]}, + "password_chg": {to:[{field: "rsa.misc.password_chg", setter: fld_set}]}, + "password_expire": {to:[{field: "rsa.misc.password_expire", setter: fld_set}]}, + "patient_fname": {to:[{field: "rsa.healthcare.patient_fname", setter: fld_set}]}, + "patient_id": {to:[{field: "rsa.healthcare.patient_id", setter: fld_set}]}, + "patient_lname": {to:[{field: "rsa.healthcare.patient_lname", setter: fld_set}]}, + "patient_mname": {to:[{field: "rsa.healthcare.patient_mname", setter: fld_set}]}, + "payload.req": {convert: to_long, to:[{field: "rsa.internal.payload_req", setter: fld_set}]}, + "payload.res": {convert: to_long, to:[{field: "rsa.internal.payload_res", setter: fld_set}]}, + "peer": {to:[{field: "rsa.crypto.peer", setter: fld_set}]}, + "peer_id": {to:[{field: "rsa.crypto.peer_id", setter: fld_set}]}, + "permgranted": {to:[{field: "rsa.misc.permgranted", setter: fld_set}]}, + "permissions": {to:[{field: "rsa.db.permissions", setter: fld_set}]}, + "permwanted": {to:[{field: "rsa.misc.permwanted", setter: fld_set}]}, + "pgid": {to:[{field: "rsa.misc.pgid", setter: fld_set}]}, + "phone_number": {to:[{field: "rsa.misc.phone", setter: fld_prio, prio: 2}]}, + "phost": {to:[{field: "rsa.network.phost", setter: fld_set}]}, + "pid": {to:[{field: "rsa.misc.pid", setter: fld_set}]}, + "policy": {to:[{field: "rsa.misc.policy", setter: fld_set}]}, + "policyUUID": {to:[{field: "rsa.misc.policyUUID", setter: fld_set}]}, + "policy_id": {to:[{field: "rsa.misc.policy_id", setter: fld_set}]}, + "policy_value": {to:[{field: "rsa.misc.policy_value", setter: fld_set}]}, + "policy_waiver": {to:[{field: "rsa.misc.policy_waiver", setter: fld_set}]}, + "policyname": {to:[{field: "rsa.misc.policy_name", setter: fld_prio, prio: 0}]}, + "pool_id": {to:[{field: "rsa.misc.pool_id", setter: fld_set}]}, + "pool_name": {to:[{field: "rsa.misc.pool_name", setter: fld_set}]}, + "port": {convert: to_long, to:[{field: "rsa.network.port", setter: fld_set}]}, + "portname": {to:[{field: "rsa.misc.port_name", setter: fld_set}]}, + "pread": {convert: to_long, to:[{field: "rsa.db.pread", setter: fld_set}]}, + "priority": {to:[{field: "rsa.misc.priority", setter: fld_set}]}, + "privilege": {to:[{field: "rsa.file.privilege", setter: fld_set}]}, + "process.vid.dst": {to:[{field: "rsa.internal.process_vid_dst", setter: fld_set}]}, + "process.vid.src": {to:[{field: "rsa.internal.process_vid_src", setter: fld_set}]}, + "process_id_val": {to:[{field: "rsa.misc.process_id_val", setter: fld_set}]}, + "processing_time": {to:[{field: "rsa.time.process_time", setter: fld_set}]}, + "profile": {to:[{field: "rsa.identity.profile", setter: fld_set}]}, + "prog_asp_num": {to:[{field: "rsa.misc.prog_asp_num", setter: fld_set}]}, + "program": {to:[{field: "rsa.misc.program", setter: fld_set}]}, + "protocol_detail": {to:[{field: "rsa.network.protocol_detail", setter: fld_set}]}, + "pwwn": {to:[{field: "rsa.storage.pwwn", setter: fld_set}]}, + "r_hostid": {to:[{field: "rsa.network.alias_host", setter: fld_append}]}, + "real_data": {to:[{field: "rsa.misc.real_data", setter: fld_set}]}, + "realm": {to:[{field: "rsa.identity.realm", setter: fld_set}]}, + "reason": {to:[{field: "rsa.misc.reason", setter: fld_set}]}, + "rec_asp_device": {to:[{field: "rsa.misc.rec_asp_device", setter: fld_set}]}, + "rec_asp_num": {to:[{field: "rsa.misc.rec_asp_num", setter: fld_set}]}, + "rec_library": {to:[{field: "rsa.misc.rec_library", setter: fld_set}]}, + "recorded_time": {convert: to_date, to:[{field: "rsa.time.recorded_time", setter: fld_set}]}, + "recordnum": {to:[{field: "rsa.misc.recordnum", setter: fld_set}]}, + "registry.key": {to:[{field: "rsa.endpoint.registry_key", setter: fld_set}]}, + "registry.value": {to:[{field: "rsa.endpoint.registry_value", setter: fld_set}]}, + "remote_domain": {to:[{field: "rsa.web.remote_domain", setter: fld_set}]}, + "remote_domain_id": {to:[{field: "rsa.network.remote_domain_id", setter: fld_set}]}, + "reputation_num": {convert: to_double, to:[{field: "rsa.web.reputation_num", setter: fld_set}]}, + "resource": {to:[{field: "rsa.internal.resource", setter: fld_set}]}, + "resource_class": {to:[{field: "rsa.internal.resource_class", setter: fld_set}]}, + "result": {to:[{field: "rsa.misc.result", setter: fld_set}]}, + "result_code": {to:[{field: "rsa.misc.result_code", setter: fld_prio, prio: 1}]}, + "resultcode": {to:[{field: "rsa.misc.result_code", setter: fld_prio, prio: 0}]}, + "rid": {convert: to_long, to:[{field: "rsa.internal.rid", setter: fld_set}]}, + "risk": {to:[{field: "rsa.misc.risk", setter: fld_set}]}, + "risk_info": {to:[{field: "rsa.misc.risk_info", setter: fld_set}]}, + "risk_num": {convert: to_double, to:[{field: "rsa.misc.risk_num", setter: fld_set}]}, + "risk_num_comm": {convert: to_double, to:[{field: "rsa.misc.risk_num_comm", setter: fld_set}]}, + "risk_num_next": {convert: to_double, to:[{field: "rsa.misc.risk_num_next", setter: fld_set}]}, + "risk_num_sand": {convert: to_double, to:[{field: "rsa.misc.risk_num_sand", setter: fld_set}]}, + "risk_num_static": {convert: to_double, to:[{field: "rsa.misc.risk_num_static", setter: fld_set}]}, + "risk_suspicious": {to:[{field: "rsa.misc.risk_suspicious", setter: fld_set}]}, + "risk_warning": {to:[{field: "rsa.misc.risk_warning", setter: fld_set}]}, + "rpayload": {to:[{field: "rsa.network.rpayload", setter: fld_set}]}, + "ruid": {to:[{field: "rsa.misc.ruid", setter: fld_set}]}, + "rule": {to:[{field: "rsa.misc.rule", setter: fld_set}]}, + "rule_group": {to:[{field: "rsa.misc.rule_group", setter: fld_set}]}, + "rule_template": {to:[{field: "rsa.misc.rule_template", setter: fld_set}]}, + "rule_uid": {to:[{field: "rsa.misc.rule_uid", setter: fld_set}]}, + "rulename": {to:[{field: "rsa.misc.rule_name", setter: fld_set}]}, + "s_certauth": {to:[{field: "rsa.crypto.s_certauth", setter: fld_set}]}, + "s_cipher": {to:[{field: "rsa.crypto.cipher_src", setter: fld_set}]}, + "s_ciphersize": {convert: to_long, to:[{field: "rsa.crypto.cipher_size_src", setter: fld_set}]}, + "s_context": {to:[{field: "rsa.misc.context_subject", setter: fld_set}]}, + "s_sslver": {to:[{field: "rsa.crypto.ssl_ver_src", setter: fld_set}]}, + "sburb": {to:[{field: "rsa.misc.sburb", setter: fld_set}]}, + "scheme": {to:[{field: "rsa.crypto.scheme", setter: fld_set}]}, + "sdomain_fld": {to:[{field: "rsa.misc.sdomain_fld", setter: fld_set}]}, + "search.text": {to:[{field: "rsa.misc.search_text", setter: fld_set}]}, + "sec": {to:[{field: "rsa.misc.sec", setter: fld_set}]}, + "second": {to:[{field: "rsa.misc.second", setter: fld_set}]}, + "sensor": {to:[{field: "rsa.misc.sensor", setter: fld_set}]}, + "sensorname": {to:[{field: "rsa.misc.sensorname", setter: fld_set}]}, + "seqnum": {to:[{field: "rsa.misc.seqnum", setter: fld_set}]}, + "serial_number": {to:[{field: "rsa.misc.serial_number", setter: fld_set}]}, + "service.account": {to:[{field: "rsa.identity.service_account", setter: fld_set}]}, + "session": {to:[{field: "rsa.misc.session", setter: fld_set}]}, + "session.split": {to:[{field: "rsa.internal.session_split", setter: fld_set}]}, + "sessionid": {to:[{field: "rsa.misc.log_session_id", setter: fld_set}]}, + "sessionid1": {to:[{field: "rsa.misc.log_session_id1", setter: fld_set}]}, + "sessiontype": {to:[{field: "rsa.misc.sessiontype", setter: fld_set}]}, + "severity": {to:[{field: "rsa.misc.severity", setter: fld_set}]}, + "sid": {to:[{field: "rsa.identity.user_sid_dst", setter: fld_set}]}, + "sig.name": {to:[{field: "rsa.misc.sig_name", setter: fld_set}]}, + "sigUUID": {to:[{field: "rsa.misc.sigUUID", setter: fld_set}]}, + "sigcat": {to:[{field: "rsa.misc.sigcat", setter: fld_set}]}, + "sigid": {convert: to_long, to:[{field: "rsa.misc.sig_id", setter: fld_set}]}, + "sigid1": {convert: to_long, to:[{field: "rsa.misc.sig_id1", setter: fld_set}]}, + "sigid_string": {to:[{field: "rsa.misc.sig_id_str", setter: fld_set}]}, + "signame": {to:[{field: "rsa.misc.policy_name", setter: fld_prio, prio: 1}]}, + "sigtype": {to:[{field: "rsa.crypto.sig_type", setter: fld_set}]}, + "sinterface": {to:[{field: "rsa.network.sinterface", setter: fld_set}]}, + "site": {to:[{field: "rsa.internal.site", setter: fld_set}]}, + "size": {convert: to_long, to:[{field: "rsa.internal.size", setter: fld_set}]}, + "smask": {to:[{field: "rsa.network.smask", setter: fld_set}]}, + "snmp.oid": {to:[{field: "rsa.misc.snmp_oid", setter: fld_set}]}, + "snmp.value": {to:[{field: "rsa.misc.snmp_value", setter: fld_set}]}, + "sourcefile": {to:[{field: "rsa.internal.sourcefile", setter: fld_set}]}, + "space": {to:[{field: "rsa.misc.space", setter: fld_set}]}, + "space1": {to:[{field: "rsa.misc.space1", setter: fld_set}]}, + "spi": {to:[{field: "rsa.misc.spi", setter: fld_set}]}, + "sql": {to:[{field: "rsa.misc.sql", setter: fld_set}]}, + "src_dn": {to:[{field: "rsa.identity.dn_src", setter: fld_set}]}, + "src_payload": {to:[{field: "rsa.misc.payload_src", setter: fld_set}]}, + "src_spi": {to:[{field: "rsa.misc.spi_src", setter: fld_set}]}, + "src_zone": {to:[{field: "rsa.network.zone_src", setter: fld_set}]}, + "srcburb": {to:[{field: "rsa.misc.srcburb", setter: fld_set}]}, + "srcdom": {to:[{field: "rsa.misc.srcdom", setter: fld_set}]}, + "srcservice": {to:[{field: "rsa.misc.srcservice", setter: fld_set}]}, + "ssid": {to:[{field: "rsa.wireless.wlan_ssid", setter: fld_prio, prio: 0}]}, + "stamp": {convert: to_date, to:[{field: "rsa.time.stamp", setter: fld_set}]}, + "starttime": {convert: to_date, to:[{field: "rsa.time.starttime", setter: fld_set}]}, + "state": {to:[{field: "rsa.misc.state", setter: fld_set}]}, + "statement": {to:[{field: "rsa.internal.statement", setter: fld_set}]}, + "status": {to:[{field: "rsa.misc.status", setter: fld_set}]}, + "status1": {to:[{field: "rsa.misc.status1", setter: fld_set}]}, + "streams": {convert: to_long, to:[{field: "rsa.misc.streams", setter: fld_set}]}, + "subcategory": {to:[{field: "rsa.misc.subcategory", setter: fld_set}]}, + "subject": {to:[{field: "rsa.email.subject", setter: fld_set}]}, + "svcno": {to:[{field: "rsa.misc.svcno", setter: fld_set}]}, + "system": {to:[{field: "rsa.misc.system", setter: fld_set}]}, + "t_context": {to:[{field: "rsa.misc.context_target", setter: fld_set}]}, + "task_name": {to:[{field: "rsa.file.task_name", setter: fld_set}]}, + "tbdstr1": {to:[{field: "rsa.misc.tbdstr1", setter: fld_set}]}, + "tbdstr2": {to:[{field: "rsa.misc.tbdstr2", setter: fld_set}]}, + "tbl_name": {to:[{field: "rsa.db.table_name", setter: fld_set}]}, + "tcp_flags": {convert: to_long, to:[{field: "rsa.misc.tcp_flags", setter: fld_set}]}, + "terminal": {to:[{field: "rsa.misc.terminal", setter: fld_set}]}, + "tgtdom": {to:[{field: "rsa.misc.tgtdom", setter: fld_set}]}, + "tgtdomain": {to:[{field: "rsa.misc.tgtdomain", setter: fld_set}]}, + "threat_name": {to:[{field: "rsa.threat.threat_category", setter: fld_set}]}, + "threat_source": {to:[{field: "rsa.threat.threat_source", setter: fld_set}]}, + "threat_val": {to:[{field: "rsa.threat.threat_desc", setter: fld_set}]}, + "threshold": {to:[{field: "rsa.misc.threshold", setter: fld_set}]}, + "time": {convert: to_date, to:[{field: "rsa.internal.time", setter: fld_set}]}, + "timestamp": {to:[{field: "rsa.time.timestamp", setter: fld_set}]}, + "timezone": {to:[{field: "rsa.time.timezone", setter: fld_set}]}, + "to": {to:[{field: "rsa.email.email_dst", setter: fld_set}]}, + "tos": {convert: to_long, to:[{field: "rsa.misc.tos", setter: fld_set}]}, + "trans_from": {to:[{field: "rsa.email.trans_from", setter: fld_set}]}, + "trans_id": {to:[{field: "rsa.db.transact_id", setter: fld_set}]}, + "trans_to": {to:[{field: "rsa.email.trans_to", setter: fld_set}]}, + "trigger_desc": {to:[{field: "rsa.misc.trigger_desc", setter: fld_set}]}, + "trigger_val": {to:[{field: "rsa.misc.trigger_val", setter: fld_set}]}, + "type": {to:[{field: "rsa.misc.type", setter: fld_set}]}, + "type1": {to:[{field: "rsa.misc.type1", setter: fld_set}]}, + "tzone": {to:[{field: "rsa.time.tzone", setter: fld_set}]}, + "ubc.req": {convert: to_long, to:[{field: "rsa.internal.ubc_req", setter: fld_set}]}, + "ubc.res": {convert: to_long, to:[{field: "rsa.internal.ubc_res", setter: fld_set}]}, + "udb_class": {to:[{field: "rsa.misc.udb_class", setter: fld_set}]}, + "url_fld": {to:[{field: "rsa.misc.url_fld", setter: fld_set}]}, + "urlpage": {to:[{field: "rsa.web.urlpage", setter: fld_set}]}, + "urlroot": {to:[{field: "rsa.web.urlroot", setter: fld_set}]}, + "user_address": {to:[{field: "rsa.email.email", setter: fld_append}]}, + "user_dept": {to:[{field: "rsa.identity.user_dept", setter: fld_set}]}, + "user_div": {to:[{field: "rsa.misc.user_div", setter: fld_set}]}, + "user_fname": {to:[{field: "rsa.identity.firstname", setter: fld_set}]}, + "user_lname": {to:[{field: "rsa.identity.lastname", setter: fld_set}]}, + "user_mname": {to:[{field: "rsa.identity.middlename", setter: fld_set}]}, + "user_org": {to:[{field: "rsa.identity.org", setter: fld_set}]}, + "user_role": {to:[{field: "rsa.identity.user_role", setter: fld_set}]}, + "userid": {to:[{field: "rsa.misc.userid", setter: fld_set}]}, + "username_fld": {to:[{field: "rsa.misc.username_fld", setter: fld_set}]}, + "utcstamp": {to:[{field: "rsa.misc.utcstamp", setter: fld_set}]}, + "v_instafname": {to:[{field: "rsa.misc.v_instafname", setter: fld_set}]}, + "vendor_event_cat": {to:[{field: "rsa.investigations.event_vcat", setter: fld_set}]}, + "version": {to:[{field: "rsa.misc.version", setter: fld_set}]}, + "vid": {to:[{field: "rsa.internal.msg_vid", setter: fld_set}]}, + "virt_data": {to:[{field: "rsa.misc.virt_data", setter: fld_set}]}, + "virusname": {to:[{field: "rsa.misc.virusname", setter: fld_set}]}, + "vlan": {convert: to_long, to:[{field: "rsa.network.vlan", setter: fld_set}]}, + "vlan.name": {to:[{field: "rsa.network.vlan_name", setter: fld_set}]}, + "vm_target": {to:[{field: "rsa.misc.vm_target", setter: fld_set}]}, + "vpnid": {to:[{field: "rsa.misc.vpnid", setter: fld_set}]}, + "vsys": {to:[{field: "rsa.misc.vsys", setter: fld_set}]}, + "vuln_ref": {to:[{field: "rsa.misc.vuln_ref", setter: fld_set}]}, + "web_cookie": {to:[{field: "rsa.web.web_cookie", setter: fld_set}]}, + "web_extension_tmp": {to:[{field: "rsa.web.web_extension_tmp", setter: fld_set}]}, + "web_host": {to:[{field: "rsa.web.alias_host", setter: fld_set}]}, + "web_method": {to:[{field: "rsa.misc.action", setter: fld_append}]}, + "web_page": {to:[{field: "rsa.web.web_page", setter: fld_set}]}, + "web_ref_domain": {to:[{field: "rsa.web.web_ref_domain", setter: fld_set}]}, + "web_ref_host": {to:[{field: "rsa.network.alias_host", setter: fld_append}]}, + "web_ref_page": {to:[{field: "rsa.web.web_ref_page", setter: fld_set}]}, + "web_ref_query": {to:[{field: "rsa.web.web_ref_query", setter: fld_set}]}, + "web_ref_root": {to:[{field: "rsa.web.web_ref_root", setter: fld_set}]}, + "wifi_channel": {convert: to_long, to:[{field: "rsa.wireless.wlan_channel", setter: fld_set}]}, + "wlan": {to:[{field: "rsa.wireless.wlan_name", setter: fld_set}]}, + "word": {to:[{field: "rsa.internal.word", setter: fld_set}]}, + "workspace_desc": {to:[{field: "rsa.misc.workspace", setter: fld_set}]}, + "workstation": {to:[{field: "rsa.network.alias_host", setter: fld_append}]}, + "year": {to:[{field: "rsa.time.year", setter: fld_set}]}, + "zone": {to:[{field: "rsa.network.zone", setter: fld_set}]}, + }; + + function to_date(value) { + switch (typeof (value)) { + case "object": + // This is a Date. But as it was obtained from evt.Get(), the VM + // doesn't see it as a JS Date anymore, thus value instanceof Date === false. + // Have to trust that any object here is a valid Date for Go. + return value; + case "string": + var asDate = new Date(value); + if (!isNaN(asDate)) return asDate; + } + } + + // ECMAScript 5.1 doesn't have Object.MAX_SAFE_INTEGER / Object.MIN_SAFE_INTEGER. + var maxSafeInt = Math.pow(2, 53) - 1; + var minSafeInt = -maxSafeInt; + + function to_long(value) { + var num = parseInt(value); + // Better not to index a number if it's not safe (above 53 bits). + return !isNaN(num) && minSafeInt <= num && num <= maxSafeInt ? num : undefined; + } + + function to_ip(value) { + if (value.indexOf(":") === -1) + return to_ipv4(value); + return to_ipv6(value); + } + + var ipv4_regex = /^(\d+)\.(\d+)\.(\d+)\.(\d+)$/; + var ipv6_hex_regex = /^[0-9A-Fa-f]{1,4}$/; + + function to_ipv4(value) { + var result = ipv4_regex.exec(value); + if (result == null || result.length !== 5) return; + for (var i = 1; i < 5; i++) { + var num = strictToInt(result[i]); + if (isNaN(num) || num < 0 || num > 255) return; + } + return value; + } + + function to_ipv6(value) { + var sqEnd = value.indexOf("]"); + if (sqEnd > -1) { + if (value.charAt(0) !== "[") return; + value = value.substr(1, sqEnd - 1); + } + var zoneOffset = value.indexOf("%"); + if (zoneOffset > -1) { + value = value.substr(0, zoneOffset); + } + var parts = value.split(":"); + if (parts == null || parts.length < 3 || parts.length > 8) return; + var numEmpty = 0; + var innerEmpty = 0; + for (var i = 0; i < parts.length; i++) { + if (parts[i].length === 0) { + numEmpty++; + if (i > 0 && i + 1 < parts.length) innerEmpty++; + } else if (!parts[i].match(ipv6_hex_regex) && + // Accept an IPv6 with a valid IPv4 at the end. + ((i + 1 < parts.length) || !to_ipv4(parts[i]))) { + return; + } + } + return innerEmpty === 0 && parts.length === 8 || innerEmpty === 1 ? value : undefined; + } + + function to_double(value) { + return parseFloat(value); + } + + function to_mac(value) { + // ES doesn't have a mac datatype so it's safe to ingest whatever was captured. + return value; + } + + function to_lowercase(value) { + // to_lowercase is used against keyword fields, which can accept + // any other type (numbers, dates). + return typeof(value) === "string"? value.toLowerCase() : value; + } + + function fld_set(dst, value) { + dst[this.field] = { v: value }; + } + + function fld_append(dst, value) { + if (dst[this.field] === undefined) { + dst[this.field] = { v: [value] }; + } else { + var base = dst[this.field]; + if (base.v.indexOf(value)===-1) base.v.push(value); + } + } + + function fld_prio(dst, value) { + if (dst[this.field] === undefined) { + dst[this.field] = { v: value, prio: this.prio}; + } else if(this.prio < dst[this.field].prio) { + dst[this.field].v = value; + dst[this.field].prio = this.prio; + } + } + + var valid_ecs_outcome = { + 'failure': true, + 'success': true, + 'unknown': true + }; + + function fld_ecs_outcome(dst, value) { + value = value.toLowerCase(); + if (valid_ecs_outcome[value] === undefined) { + value = 'unknown'; + } + if (dst[this.field] === undefined) { + dst[this.field] = { v: value }; + } else if (dst[this.field].v === 'unknown') { + dst[this.field] = { v: value }; + } + } + + function map_all(evt, targets, value) { + for (var i = 0; i < targets.length; i++) { + evt.Put(targets[i], value); + } + } + + function populate_fields(evt) { + var base = evt.Get(FIELDS_OBJECT); + if (base === null) return; + alternate_datetime(evt); + if (map_ecs) { + do_populate(evt, base, ecs_mappings); + } + if (map_rsa) { + do_populate(evt, base, rsa_mappings); + } + if (keep_raw) { + evt.Put("rsa.raw", base); + } + evt.Delete(FIELDS_OBJECT); + } + + var datetime_alt_components = [ + {field: "day", fmts: [[dF]]}, + {field: "year", fmts: [[dW]]}, + {field: "month", fmts: [[dB],[dG]]}, + {field: "date", fmts: [[dW,dSkip,dG,dSkip,dF],[dW,dSkip,dB,dSkip,dF],[dW,dSkip,dR,dSkip,dF]]}, + {field: "hour", fmts: [[dN]]}, + {field: "min", fmts: [[dU]]}, + {field: "secs", fmts: [[dO]]}, + {field: "time", fmts: [[dN, dSkip, dU, dSkip, dO]]}, + ]; + + function alternate_datetime(evt) { + if (evt.Get(FIELDS_PREFIX + "event_time") != null) { + return; + } + var tzOffset = tz_offset; + if (tzOffset === "event") { + tzOffset = evt.Get("event.timezone"); + } + var container = new DateContainer(tzOffset); + for (var i=0; i} %{timezone}]||%{web_method}||%{web_host}||%{webpage}||%{web_query}||%{network_service}||%{resultcode}||%{sbytes}||%{web_referer}||%{user_agent}||%{web_cookie}", processor_chain([ + dup1, + dup2, + dup3, + dup4, + dup5, + dup6, + ])); + + var hdr1 = match("HEADER#0:0001", "message", "%APACHETOMCAT-%{level}-%{messageid}: %{payload}", processor_chain([ + setc("header_id","0001"), + ])); + + var hdr2 = match("HEADER#1:0002", "message", "%{hmonth->} %{hday->} %{htime->} %{hostname->} %APACHETOMCAT- %{messageid}: %{payload}", processor_chain([ + setc("header_id","0002"), + ])); + + var select1 = linear_select([ + hdr1, + hdr2, + ]); + + var msg1 = msg("ABCD", dup7); + + var msg2 = msg("BADMETHOD", dup7); + + var msg3 = msg("BADMTHD", dup7); + + var msg4 = msg("BDMTHD", dup7); + + var msg5 = msg("INDEX", dup7); + + var msg6 = msg("CFYZ", dup7); + + var msg7 = msg("CONNECT", dup7); + + var msg8 = msg("DELETE", dup7); + + var msg9 = msg("DETECT_METHOD_TYPE", dup7); + + var msg10 = msg("FGET", dup7); + + var msg11 = msg("GET", dup7); + + var msg12 = msg("get", dup7); + + var msg13 = msg("HEAD", dup7); + + var msg14 = msg("id", dup7); + + var msg15 = msg("LOCK", dup7); + + var msg16 = msg("MKCOL", dup7); + + var msg17 = msg("NCIRCLE", dup7); + + var msg18 = msg("OPTIONS", dup7); + + var msg19 = msg("POST", dup7); + + var msg20 = msg("PRONECT", dup7); + + var msg21 = msg("PROPFIND", dup7); + + var msg22 = msg("PUT", dup7); + + var msg23 = msg("QUALYS", dup7); + + var msg24 = msg("SEARCH", dup7); + + var msg25 = msg("TRACK", dup7); + + var msg26 = msg("TRACE", dup7); + + var msg27 = msg("uGET", dup7); + + var msg28 = msg("null", dup7); + + var msg29 = msg("rndmmtd", dup7); + + var msg30 = msg("RNDMMTD", dup7); + + var msg31 = msg("asdf", dup7); + + var msg32 = msg("DEBUG", dup7); + + var msg33 = msg("COOK", dup7); + + var msg34 = msg("nGET", dup7); + + var chain1 = processor_chain([ + select1, + msgid_select({ + "ABCD": msg1, + "BADMETHOD": msg2, + "BADMTHD": msg3, + "BDMTHD": msg4, + "CFYZ": msg6, + "CONNECT": msg7, + "COOK": msg33, + "DEBUG": msg32, + "DELETE": msg8, + "DETECT_METHOD_TYPE": msg9, + "FGET": msg10, + "GET": msg11, + "HEAD": msg13, + "INDEX": msg5, + "LOCK": msg15, + "MKCOL": msg16, + "NCIRCLE": msg17, + "OPTIONS": msg18, + "POST": msg19, + "PRONECT": msg20, + "PROPFIND": msg21, + "PUT": msg22, + "QUALYS": msg23, + "RNDMMTD": msg30, + "SEARCH": msg24, + "TRACE": msg26, + "TRACK": msg25, + "asdf": msg31, + "get": msg12, + "id": msg14, + "nGET": msg34, + "null": msg28, + "rndmmtd": msg29, + "uGET": msg27, + }), + ]); + + var part1 = match("MESSAGE#0:ABCD", "nwparser.payload", "%{saddr}||%{fld5}||%{username}||[%{fld7->} %{timezone}]||%{web_method}||%{web_host}||%{webpage}||%{web_query}||%{network_service}||%{resultcode}||%{sbytes}||%{web_referer}||%{user_agent}||%{web_cookie}", processor_chain([ + dup1, + dup2, + dup3, + dup4, + dup5, + dup6, + ])); + - community_id: null + - registered_domain: + field: dns.question.name + ignore_failure: true + ignore_missing: true + target_etld_field: dns.question.top_level_domain + target_field: dns.question.registered_domain + target_subdomain_field: dns.question.subdomain + - registered_domain: + field: client.domain + ignore_failure: true + ignore_missing: true + target_etld_field: client.top_level_domain + target_field: client.registered_domain + target_subdomain_field: client.subdomain + - registered_domain: + field: server.domain + ignore_failure: true + ignore_missing: true + target_etld_field: server.top_level_domain + target_field: server.registered_domain + target_subdomain_field: server.subdomain + - registered_domain: + field: destination.domain + ignore_failure: true + ignore_missing: true + target_etld_field: destination.top_level_domain + target_field: destination.registered_domain + target_subdomain_field: destination.subdomain + - registered_domain: + field: source.domain + ignore_failure: true + ignore_missing: true + target_etld_field: source.top_level_domain + target_field: source.registered_domain + target_subdomain_field: source.subdomain + - registered_domain: + field: url.domain + ignore_failure: true + ignore_missing: true + target_etld_field: url.top_level_domain + target_field: url.registered_domain + target_subdomain_field: url.subdomain + - add_locale: null + tags: + - tomcat-log + - forwarded + udp: null + data_stream.namespace: default + - name: tcp-tomcat + type: tcp + use_output: default + streams: + - condition: ${kubernetes.hints.tomcat.log.enabled} == true or ${kubernetes.hints.tomcat.enabled} == true + data_stream: + dataset: tomcat.log + type: logs + fields: + observer: + product: TomCat + type: Web + vendor: Apache + fields_under_root: true + host: localhost:9523 + processors: + - script: + lang: javascript + params: + debug: false + ecs: true + keep_raw: false + rsa: true + tz_offset: local + source: | + // Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + // or more contributor license agreements. Licensed under the Elastic License; + // you may not use this file except in compliance with the Elastic License. + + /* jshint -W014,-W016,-W097,-W116 */ + + var processor = require("processor"); + var console = require("console"); + + var FLAG_FIELD = "log.flags"; + var FIELDS_OBJECT = "nwparser"; + var FIELDS_PREFIX = FIELDS_OBJECT + "."; + + var defaults = { + debug: false, + ecs: true, + rsa: false, + keep_raw: false, + tz_offset: "local", + strip_priority: true + }; + + var saved_flags = null; + var debug; + var map_ecs; + var map_rsa; + var keep_raw; + var device; + var tz_offset; + var strip_priority; + + // Register params from configuration. + function register(params) { + debug = params.debug !== undefined ? params.debug : defaults.debug; + map_ecs = params.ecs !== undefined ? params.ecs : defaults.ecs; + map_rsa = params.rsa !== undefined ? params.rsa : defaults.rsa; + keep_raw = params.keep_raw !== undefined ? params.keep_raw : defaults.keep_raw; + tz_offset = parse_tz_offset(params.tz_offset !== undefined? params.tz_offset : defaults.tz_offset); + strip_priority = params.strip_priority !== undefined? params.strip_priority : defaults.strip_priority; + device = new DeviceProcessor(); + } + + function parse_tz_offset(offset) { + var date; + var m; + switch(offset) { + // local uses the tz offset from the JS VM. + case "local": + date = new Date(); + // Reversing the sign as we the offset from UTC, not to UTC. + return parse_local_tz_offset(-date.getTimezoneOffset()); + // event uses the tz offset from event.timezone (add_locale processor). + case "event": + return offset; + // Otherwise a tz offset in the form "[+-][0-9]{4}" is required. + default: + m = offset.match(/^([+\-])([0-9]{2}):?([0-9]{2})?$/); + if (m === null || m.length !== 4) { + throw("bad timezone offset: '" + offset + "'. Must have the form +HH:MM"); + } + return m[1] + m[2] + ":" + (m[3]!==undefined? m[3] : "00"); + } + } + + function parse_local_tz_offset(minutes) { + var neg = minutes < 0; + minutes = Math.abs(minutes); + var min = minutes % 60; + var hours = Math.floor(minutes / 60); + var pad2digit = function(n) { + if (n < 10) { return "0" + n;} + return "" + n; + }; + return (neg? "-" : "+") + pad2digit(hours) + ":" + pad2digit(min); + } + + function process(evt) { + // Function register is only called by the processor when `params` are set + // in the processor config. + if (device === undefined) { + register(defaults); + } + return device.process(evt); + } + + function processor_chain(subprocessors) { + var builder = new processor.Chain(); + subprocessors.forEach(builder.Add); + return builder.Build().Run; + } + + function linear_select(subprocessors) { + return function (evt) { + var flags = evt.Get(FLAG_FIELD); + var i; + for (i = 0; i < subprocessors.length; i++) { + evt.Delete(FLAG_FIELD); + if (debug) console.warn("linear_select trying entry " + i); + subprocessors[i](evt); + // Dissect processor succeeded? + if (evt.Get(FLAG_FIELD) == null) break; + if (debug) console.warn("linear_select failed entry " + i); + } + if (flags !== null) { + evt.Put(FLAG_FIELD, flags); + } + if (debug) { + if (i < subprocessors.length) { + console.warn("linear_select matched entry " + i); + } else { + console.warn("linear_select didn't match"); + } + } + }; + } + + function conditional(opt) { + return function(evt) { + if (opt.if(evt)) { + opt.then(evt); + } else if (opt.else) { + opt.else(evt); + } + }; + } + + var strip_syslog_priority = (function() { + var isEnabled = function() { return strip_priority === true; }; + var fetchPRI = field("_pri"); + var fetchPayload = field("payload"); + var removePayload = remove(["payload"]); + var cleanup = remove(["_pri", "payload"]); + var onMatch = function(evt) { + var pri, priStr = fetchPRI(evt); + if (priStr != null + && 0 < priStr.length && priStr.length < 4 + && !isNaN((pri = Number(priStr))) + && 0 <= pri && pri < 192) { + var severity = pri & 7, + facility = pri >> 3; + setc("_severity", "" + severity)(evt); + setc("_facility", "" + facility)(evt); + // Replace message with priority stripped. + evt.Put("message", fetchPayload(evt)); + removePayload(evt); + } else { + // not a valid syslog PRI, cleanup. + cleanup(evt); + } + }; + return conditional({ + if: isEnabled, + then: cleanup_flags(match( + "STRIP_PRI", + "message", + "<%{_pri}>%{payload}", + onMatch + )) + }); + })(); + + function match(id, src, pattern, on_success) { + var dissect = new processor.Dissect({ + field: src, + tokenizer: pattern, + target_prefix: FIELDS_OBJECT, + ignore_failure: true, + overwrite_keys: true, + trim_values: "right" + }); + return function (evt) { + var msg = evt.Get(src); + dissect.Run(evt); + var failed = evt.Get(FLAG_FIELD) != null; + if (debug) { + if (failed) { + console.debug("dissect fail: " + id + " field:" + src); + } else { + console.debug("dissect OK: " + id + " field:" + src); + } + console.debug(" expr: <<" + pattern + ">>"); + console.debug(" input: <<" + msg + ">>"); + } + if (on_success != null && !failed) { + on_success(evt); + } + }; + } + + function match_copy(id, src, dst, on_success) { + dst = FIELDS_PREFIX + dst; + if (dst === FIELDS_PREFIX || dst === src) { + return function (evt) { + if (debug) { + console.debug("noop OK: " + id + " field:" + src); + console.debug(" input: <<" + evt.Get(src) + ">>"); + } + if (on_success != null) on_success(evt); + } + } + return function (evt) { + var msg = evt.Get(src); + evt.Put(dst, msg); + if (debug) { + console.debug("copy OK: " + id + " field:" + src); + console.debug(" target: '" + dst + "'"); + console.debug(" input: <<" + msg + ">>"); + } + if (on_success != null) on_success(evt); + } + } + + function cleanup_flags(processor) { + return function(evt) { + processor(evt); + evt.Delete(FLAG_FIELD); + }; + } + + function all_match(opts) { + return function (evt) { + var i; + for (i = 0; i < opts.processors.length; i++) { + evt.Delete(FLAG_FIELD); + opts.processors[i](evt); + // Dissect processor succeeded? + if (evt.Get(FLAG_FIELD) != null) { + if (debug) console.warn("all_match failure at " + i); + if (opts.on_failure != null) opts.on_failure(evt); + return; + } + if (debug) console.warn("all_match success at " + i); + } + if (opts.on_success != null) opts.on_success(evt); + }; + } + + function msgid_select(mapping) { + return function (evt) { + var msgid = evt.Get(FIELDS_PREFIX + "messageid"); + if (msgid == null) { + if (debug) console.warn("msgid_select: no messageid captured!"); + return; + } + var next = mapping[msgid]; + if (next === undefined) { + if (debug) console.warn("msgid_select: no mapping for messageid:" + msgid); + return; + } + if (debug) console.info("msgid_select: matched key=" + msgid); + return next(evt); + }; + } + + function msg(msg_id, match) { + return function (evt) { + match(evt); + if (evt.Get(FLAG_FIELD) == null) { + evt.Put(FIELDS_PREFIX + "msg_id1", msg_id); + } + }; + } + + var start; + + function save_flags(evt) { + saved_flags = evt.Get(FLAG_FIELD); + evt.Put("event.original", evt.Get("message")); + } + + function restore_flags(evt) { + if (saved_flags !== null) { + evt.Put(FLAG_FIELD, saved_flags); + } + evt.Delete("message"); + } + + function constant(value) { + return function (evt) { + return value; + }; + } + + function field(name) { + var fullname = FIELDS_PREFIX + name; + return function (evt) { + return evt.Get(fullname); + }; + } + + function STRCAT(args) { + var s = ""; + var i; + for (i = 0; i < args.length; i++) { + s += args[i]; + } + return s; + } + + // TODO: Implement + function DIRCHK(args) { + unimplemented("DIRCHK"); + } + + function strictToInt(str) { + return str * 1; + } + + function CALC(args) { + if (args.length !== 3) { + console.warn("skipped call to CALC with " + args.length + " arguments."); + return; + } + var a = strictToInt(args[0]); + var b = strictToInt(args[2]); + if (isNaN(a) || isNaN(b)) { + console.warn("failed evaluating CALC arguments a='" + args[0] + "' b='" + args[2] + "'."); + return; + } + var result; + switch (args[1]) { + case "+": + result = a + b; + break; + case "-": + result = a - b; + break; + case "*": + result = a * b; + break; + default: + // Only * and + seen in the parsers. + console.warn("unknown CALC operation '" + args[1] + "'."); + return; + } + // Always return a string + return result !== undefined ? "" + result : result; + } + + var quoteChars = "\"'`"; + function RMQ(args) { + if(args.length !== 1) { + console.warn("RMQ: only one argument expected"); + return; + } + var value = args[0].trim(); + var n = value.length; + var char; + return n > 1 + && (char=value.charAt(0)) === value.charAt(n-1) + && quoteChars.indexOf(char) !== -1? + value.substr(1, n-2) + : value; + } + + function call(opts) { + var args = new Array(opts.args.length); + return function (evt) { + for (var i = 0; i < opts.args.length; i++) + if ((args[i] = opts.args[i](evt)) == null) return; + var result = opts.fn(args); + if (result != null) { + evt.Put(opts.dest, result); + } + }; + } + + function nop(evt) { + } + + function appendErrorMsg(evt, msg) { + var value = evt.Get("error.message"); + if (value == null) { + value = [msg]; + } else if (msg instanceof Array) { + value.push(msg); + } else { + value = [value, msg]; + } + evt.Put("error.message", value); + } + + function unimplemented(name) { + appendErrorMsg("unimplemented feature: " + name); + } + + function lookup(opts) { + return function (evt) { + var key = opts.key(evt); + if (key == null) return; + var value = opts.map.keyvaluepairs[key]; + if (value === undefined) { + value = opts.map.default; + } + if (value !== undefined) { + evt.Put(opts.dest, value(evt)); + } + }; + } + + function set(fields) { + return new processor.AddFields({ + target: FIELDS_OBJECT, + fields: fields, + }); + } + + function setf(dst, src) { + return function (evt) { + var val = evt.Get(FIELDS_PREFIX + src); + if (val != null) evt.Put(FIELDS_PREFIX + dst, val); + }; + } + + function setc(dst, value) { + return function (evt) { + evt.Put(FIELDS_PREFIX + dst, value); + }; + } + + function set_field(opts) { + return function (evt) { + var val = opts.value(evt); + if (val != null) evt.Put(opts.dest, val); + }; + } + + function dump(label) { + return function (evt) { + console.log("Dump of event at " + label + ": " + JSON.stringify(evt, null, "\t")); + }; + } + + function date_time_join_args(evt, arglist) { + var str = ""; + for (var i = 0; i < arglist.length; i++) { + var fname = FIELDS_PREFIX + arglist[i]; + var val = evt.Get(fname); + if (val != null) { + if (str !== "") str += " "; + str += val; + } else { + if (debug) console.warn("in date_time: input arg " + fname + " is not set"); + } + } + return str; + } + + function to2Digit(num) { + return num? (num < 10? "0" + num : num) : "00"; + } + + // Make two-digit dates 00-69 interpreted as 2000-2069 + // and dates 70-99 translated to 1970-1999. + var twoDigitYearEpoch = 70; + var twoDigitYearCentury = 2000; + + // This is to accept dates up to 2 days in the future, only used when + // no year is specified in a date. 2 days should be enough to account for + // time differences between systems and different tz offsets. + var maxFutureDelta = 2*24*60*60*1000; + + // DateContainer stores date fields and then converts those fields into + // a Date. Necessary because building a Date using its set() methods gives + // different results depending on the order of components. + function DateContainer(tzOffset) { + this.offset = tzOffset === undefined? "Z" : tzOffset; + } + + DateContainer.prototype = { + setYear: function(v) {this.year = v;}, + setMonth: function(v) {this.month = v;}, + setDay: function(v) {this.day = v;}, + setHours: function(v) {this.hours = v;}, + setMinutes: function(v) {this.minutes = v;}, + setSeconds: function(v) {this.seconds = v;}, + + setUNIX: function(v) {this.unix = v;}, + + set2DigitYear: function(v) { + this.year = v < twoDigitYearEpoch? twoDigitYearCentury + v : twoDigitYearCentury + v - 100; + }, + + toDate: function() { + if (this.unix !== undefined) { + return new Date(this.unix * 1000); + } + if (this.day === undefined || this.month === undefined) { + // Can't make a date from this. + return undefined; + } + if (this.year === undefined) { + // A date without a year. Set current year, or previous year + // if date would be in the future. + var now = new Date(); + this.year = now.getFullYear(); + var date = this.toDate(); + if (date.getTime() - now.getTime() > maxFutureDelta) { + date.setFullYear(now.getFullYear() - 1); + } + return date; + } + var MM = to2Digit(this.month); + var DD = to2Digit(this.day); + var hh = to2Digit(this.hours); + var mm = to2Digit(this.minutes); + var ss = to2Digit(this.seconds); + return new Date(this.year + "-" + MM + "-" + DD + "T" + hh + ":" + mm + ":" + ss + this.offset); + } + } + + function date_time_try_pattern(fmt, str, tzOffset) { + var date = new DateContainer(tzOffset); + var pos = date_time_try_pattern_at_pos(fmt, str, 0, date); + return pos !== undefined? date.toDate() : undefined; + } + + function date_time_try_pattern_at_pos(fmt, str, pos, date) { + var len = str.length; + for (var proc = 0; pos !== undefined && pos < len && proc < fmt.length; proc++) { + pos = fmt[proc](str, pos, date); + } + return pos; + } + + function date_time(opts) { + return function (evt) { + var tzOffset = opts.tz || tz_offset; + if (tzOffset === "event") { + tzOffset = evt.Get("event.timezone"); + } + var str = date_time_join_args(evt, opts.args); + for (var i = 0; i < opts.fmts.length; i++) { + var date = date_time_try_pattern(opts.fmts[i], str, tzOffset); + if (date !== undefined) { + evt.Put(FIELDS_PREFIX + opts.dest, date); + return; + } + } + if (debug) console.warn("in date_time: id=" + opts.id + " FAILED: " + str); + }; + } + + var uA = 60 * 60 * 24; + var uD = 60 * 60 * 24; + var uF = 60 * 60; + var uG = 60 * 60 * 24 * 30; + var uH = 60 * 60; + var uI = 60 * 60; + var uJ = 60 * 60 * 24; + var uM = 60 * 60 * 24 * 30; + var uN = 60 * 60; + var uO = 1; + var uS = 1; + var uT = 60; + var uU = 60; + var uc = dc; + + function duration(opts) { + return function(evt) { + var str = date_time_join_args(evt, opts.args); + for (var i = 0; i < opts.fmts.length; i++) { + var seconds = duration_try_pattern(opts.fmts[i], str); + if (seconds !== undefined) { + evt.Put(FIELDS_PREFIX + opts.dest, seconds); + return; + } + } + if (debug) console.warn("in duration: id=" + opts.id + " (s) FAILED: " + str); + }; + } + + function duration_try_pattern(fmt, str) { + var secs = 0; + var pos = 0; + for (var i=0; i [ month_id , how many chars to skip if month in long form ] + "Jan": [0, 4], + "Feb": [1, 5], + "Mar": [2, 2], + "Apr": [3, 2], + "May": [4, 0], + "Jun": [5, 1], + "Jul": [6, 1], + "Aug": [7, 3], + "Sep": [8, 6], + "Oct": [9, 4], + "Nov": [10, 5], + "Dec": [11, 4], + "jan": [0, 4], + "feb": [1, 5], + "mar": [2, 2], + "apr": [3, 2], + "may": [4, 0], + "jun": [5, 1], + "jul": [6, 1], + "aug": [7, 3], + "sep": [8, 6], + "oct": [9, 4], + "nov": [10, 5], + "dec": [11, 4], + }; + + // var dC = undefined; + var dR = dateMonthName(true); + var dB = dateMonthName(false); + var dM = dateFixedWidthNumber("M", 2, 1, 12, DateContainer.prototype.setMonth); + var dG = dateVariableWidthNumber("G", 1, 12, DateContainer.prototype.setMonth); + var dD = dateFixedWidthNumber("D", 2, 1, 31, DateContainer.prototype.setDay); + var dF = dateVariableWidthNumber("F", 1, 31, DateContainer.prototype.setDay); + var dH = dateFixedWidthNumber("H", 2, 0, 24, DateContainer.prototype.setHours); + var dI = dateVariableWidthNumber("I", 0, 24, DateContainer.prototype.setHours); // Accept hours >12 + var dN = dateVariableWidthNumber("N", 0, 24, DateContainer.prototype.setHours); + var dT = dateFixedWidthNumber("T", 2, 0, 59, DateContainer.prototype.setMinutes); + var dU = dateVariableWidthNumber("U", 0, 59, DateContainer.prototype.setMinutes); + var dP = parseAMPM; // AM|PM + var dQ = parseAMPM; // A.M.|P.M + var dS = dateFixedWidthNumber("S", 2, 0, 60, DateContainer.prototype.setSeconds); + var dO = dateVariableWidthNumber("O", 0, 60, DateContainer.prototype.setSeconds); + var dY = dateFixedWidthNumber("Y", 2, 0, 99, DateContainer.prototype.set2DigitYear); + var dW = dateFixedWidthNumber("W", 4, 1000, 9999, DateContainer.prototype.setYear); + var dZ = parseHMS; + var dX = dateVariableWidthNumber("X", 0, 0x10000000000, DateContainer.prototype.setUNIX); + + // parseAMPM parses "A.M", "AM", "P.M", "PM" from logs. + // Only works if this modifier appears after the hour has been read from logs + // which is always the case in the 300 devices. + function parseAMPM(str, pos, date) { + var n = str.length; + var start = skipws(str, pos); + if (start + 2 > n) return; + var head = str.substr(start, 2).toUpperCase(); + var isPM = false; + var skip = false; + switch (head) { + case "A.": + skip = true; + /* falls through */ + case "AM": + break; + case "P.": + skip = true; + /* falls through */ + case "PM": + isPM = true; + break; + default: + if (debug) console.warn("can't parse pos " + start + " as AM/PM: " + str + "(head:" + head + ")"); + return; + } + pos = start + 2; + if (skip) { + if (pos+2 > n || str.substr(pos, 2).toUpperCase() !== "M.") { + if (debug) console.warn("can't parse pos " + start + " as AM/PM: " + str + "(tail)"); + return; + } + pos += 2; + } + var hh = date.hours; + if (isPM) { + // Accept existing hour in 24h format. + if (hh < 12) hh += 12; + } else { + if (hh === 12) hh = 0; + } + date.setHours(hh); + return pos; + } + + function parseHMS(str, pos, date) { + return date_time_try_pattern_at_pos([dN, dc(":"), dU, dc(":"), dO], str, pos, date); + } + + function skipws(str, pos) { + for ( var n = str.length; + pos < n && str.charAt(pos) === " "; + pos++) + ; + return pos; + } + + function skipdigits(str, pos) { + var c; + for (var n = str.length; + pos < n && (c = str.charAt(pos)) >= "0" && c <= "9"; + pos++) + ; + return pos; + } + + function dSkip(str, pos, date) { + var chr; + for (;pos < str.length && (chr=str[pos])<'0' || chr>'9'; pos++) {} + return pos < str.length? pos : undefined; + } + + function dateVariableWidthNumber(fmtChar, min, max, setter) { + return function (str, pos, date) { + var start = skipws(str, pos); + pos = skipdigits(str, start); + var s = str.substr(start, pos - start); + var value = parseInt(s, 10); + if (value >= min && value <= max) { + setter.call(date, value); + return pos; + } + return; + }; + } + + function dateFixedWidthNumber(fmtChar, width, min, max, setter) { + return function (str, pos, date) { + pos = skipws(str, pos); + var n = str.length; + if (pos + width > n) return; + var s = str.substr(pos, width); + var value = parseInt(s, 10); + if (value >= min && value <= max) { + setter.call(date, value); + return pos + width; + } + return; + }; + } + + // Short month name (Jan..Dec). + function dateMonthName(long) { + return function (str, pos, date) { + pos = skipws(str, pos); + var n = str.length; + if (pos + 3 > n) return; + var mon = str.substr(pos, 3); + var idx = shortMonths[mon]; + if (idx === undefined) { + idx = shortMonths[mon.toLowerCase()]; + } + if (idx === undefined) { + //console.warn("parsing date_time: '" + mon + "' is not a valid short month (%B)"); + return; + } + date.setMonth(idx[0]+1); + return pos + 3 + (long ? idx[1] : 0); + }; + } + + function url_wrapper(dst, src, fn) { + return function(evt) { + var value = evt.Get(FIELDS_PREFIX + src), result; + if (value != null && (result = fn(value))!== undefined) { + evt.Put(FIELDS_PREFIX + dst, result); + } else { + console.debug(fn.name + " failed for '" + value + "'"); + } + }; + } + + // The following regular expression for parsing URLs from: + // https://github.com/wizard04wsu/URI_Parsing + // + // The MIT License (MIT) + // + // Copyright (c) 2014 Andrew Harrison + // + // Permission is hereby granted, free of charge, to any person obtaining a copy of + // this software and associated documentation files (the "Software"), to deal in + // the Software without restriction, including without limitation the rights to + // use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + // the Software, and to permit persons to whom the Software is furnished to do so, + // subject to the following conditions: + // + // The above copyright notice and this permission notice shall be included in all + // copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + // FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + // COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + // IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + // CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + var uriRegExp = /^([a-z][a-z0-9+.\-]*):(?:\/\/((?:(?=((?:[a-z0-9\-._~!$&'()*+,;=:]|%[0-9A-F]{2})*))(\3)@)?(?=(\[[0-9A-F:.]{2,}\]|(?:[a-z0-9\-._~!$&'()*+,;=]|%[0-9A-F]{2})*))\5(?::(?=(\d*))\6)?)(\/(?=((?:[a-z0-9-._~!$&'()*+,;=:@\/]|%[0-9A-F]{2})*))\8)?|(\/?(?!\/)(?=((?:[a-z0-9-._~!$&'()*+,;=:@\/]|%[0-9A-F]{2})*))\10)?)(?:\?(?=((?:[a-z0-9-._~!$&'()*+,;=:@\/?]|%[0-9A-F]{2})*))\11)?(?:#(?=((?:[a-z0-9-._~!$&'()*+,;=:@\/?]|%[0-9A-F]{2})*))\12)?$/i; + + var uriScheme = 1; + var uriDomain = 5; + var uriPort = 6; + var uriPath = 7; + var uriPathAlt = 9; + var uriQuery = 11; + + function domain(dst, src) { + return url_wrapper(dst, src, extract_domain); + } + + function split_url(value) { + var m = value.match(uriRegExp); + if (m && m[uriDomain]) return m; + // Support input in the form "www.example.net/path", but not "/path". + m = ("null://" + value).match(uriRegExp); + if (m) return m; + } + + function extract_domain(value) { + var m = split_url(value); + if (m && m[uriDomain]) return m[uriDomain]; + } + + var extFromPage = /\.[^.]+$/; + function extract_ext(value) { + var page = extract_page(value); + if (page) { + var m = page.match(extFromPage); + if (m) return m[0]; + } + } + + function ext(dst, src) { + return url_wrapper(dst, src, extract_ext); + } + + function fqdn(dst, src) { + // TODO: fqdn and domain(eTLD+1) are currently the same. + return domain(dst, src); + } + + var pageFromPathRegExp = /\/([^\/]+)$/; + var pageName = 1; + + function extract_page(value) { + value = extract_path(value); + if (!value) return undefined; + var m = value.match(pageFromPathRegExp); + if (m) return m[pageName]; + } + + function page(dst, src) { + return url_wrapper(dst, src, extract_page); + } + + function extract_path(value) { + var m = split_url(value); + return m? m[uriPath] || m[uriPathAlt] : undefined; + } + + function path(dst, src) { + return url_wrapper(dst, src, extract_path); + } + + // Map common schemes to their default port. + // port has to be a string (will be converted at a later stage). + var schemePort = { + "ftp": "21", + "ssh": "22", + "http": "80", + "https": "443", + }; + + function extract_port(value) { + var m = split_url(value); + if (!m) return undefined; + if (m[uriPort]) return m[uriPort]; + if (m[uriScheme]) { + return schemePort[m[uriScheme]]; + } + } + + function port(dst, src) { + return url_wrapper(dst, src, extract_port); + } + + function extract_query(value) { + var m = split_url(value); + if (m && m[uriQuery]) return m[uriQuery]; + } + + function query(dst, src) { + return url_wrapper(dst, src, extract_query); + } + + function extract_root(value) { + var m = split_url(value); + if (m && m[uriDomain] && m[uriDomain]) { + var scheme = m[uriScheme] && m[uriScheme] !== "null"? + m[uriScheme] + "://" : ""; + var port = m[uriPort]? ":" + m[uriPort] : ""; + return scheme + m[uriDomain] + port; + } + } + + function root(dst, src) { + return url_wrapper(dst, src, extract_root); + } + + function tagval(id, src, cfg, keys, on_success) { + var fail = function(evt) { + evt.Put(FLAG_FIELD, "tagval_parsing_error"); + } + if (cfg.kv_separator.length !== 1) { + throw("Invalid TAGVALMAP ValueDelimiter (must have 1 character)"); + } + var quotes_len = cfg.open_quote.length > 0 && cfg.close_quote.length > 0? + cfg.open_quote.length + cfg.close_quote.length : 0; + var kv_regex = new RegExp('^([^' + cfg.kv_separator + ']*)*' + cfg.kv_separator + ' *(.*)*$'); + return function(evt) { + var msg = evt.Get(src); + if (msg === undefined) { + console.warn("tagval: input field is missing"); + return fail(evt); + } + var pairs = msg.split(cfg.pair_separator); + var i; + var success = false; + var prev = ""; + for (i=0; i 0 && + value.length >= cfg.open_quote.length + cfg.close_quote.length && + value.substr(0, cfg.open_quote.length) === cfg.open_quote && + value.substr(value.length - cfg.close_quote.length) === cfg.close_quote) { + value = value.substr(cfg.open_quote.length, value.length - quotes_len); + } + evt.Put(FIELDS_PREFIX + field, value); + success = true; + } + if (!success) { + return fail(evt); + } + if (on_success != null) { + on_success(evt); + } + } + } + + var ecs_mappings = { + "_facility": {convert: to_long, to:[{field: "log.syslog.facility.code", setter: fld_set}]}, + "_pri": {convert: to_long, to:[{field: "log.syslog.priority", setter: fld_set}]}, + "_severity": {convert: to_long, to:[{field: "log.syslog.severity.code", setter: fld_set}]}, + "action": {to:[{field: "event.action", setter: fld_prio, prio: 0}]}, + "administrator": {to:[{field: "related.user", setter: fld_append},{field: "user.name", setter: fld_prio, prio: 4}]}, + "alias.ip": {convert: to_ip, to:[{field: "host.ip", setter: fld_prio, prio: 3},{field: "related.ip", setter: fld_append}]}, + "alias.ipv6": {convert: to_ip, to:[{field: "host.ip", setter: fld_prio, prio: 4},{field: "related.ip", setter: fld_append}]}, + "alias.mac": {convert: to_mac, to:[{field: "host.mac", setter: fld_prio, prio: 1}]}, + "application": {to:[{field: "network.application", setter: fld_set}]}, + "bytes": {convert: to_long, to:[{field: "network.bytes", setter: fld_set}]}, + "c_domain": {to:[{field: "source.domain", setter: fld_prio, prio: 1}]}, + "c_logon_id": {to:[{field: "user.id", setter: fld_prio, prio: 2}]}, + "c_user_name": {to:[{field: "related.user", setter: fld_append},{field: "user.name", setter: fld_prio, prio: 8}]}, + "c_username": {to:[{field: "related.user", setter: fld_append},{field: "user.name", setter: fld_prio, prio: 2}]}, + "cctld": {to:[{field: "url.top_level_domain", setter: fld_prio, prio: 1}]}, + "child_pid": {convert: to_long, to:[{field: "process.pid", setter: fld_prio, prio: 1}]}, + "child_pid_val": {to:[{field: "process.title", setter: fld_set}]}, + "child_process": {to:[{field: "process.name", setter: fld_prio, prio: 1}]}, + "city.dst": {to:[{field: "destination.geo.city_name", setter: fld_set}]}, + "city.src": {to:[{field: "source.geo.city_name", setter: fld_set}]}, + "daddr": {convert: to_ip, to:[{field: "destination.ip", setter: fld_set},{field: "related.ip", setter: fld_append}]}, + "daddr_v6": {convert: to_ip, to:[{field: "destination.ip", setter: fld_set},{field: "related.ip", setter: fld_append}]}, + "ddomain": {to:[{field: "destination.domain", setter: fld_prio, prio: 0}]}, + "devicehostip": {convert: to_ip, to:[{field: "host.ip", setter: fld_prio, prio: 2},{field: "related.ip", setter: fld_append}]}, + "devicehostmac": {convert: to_mac, to:[{field: "host.mac", setter: fld_prio, prio: 0}]}, + "dhost": {to:[{field: "destination.address", setter: fld_set},{field: "related.hosts", setter: fld_append}]}, + "dinterface": {to:[{field: "observer.egress.interface.name", setter: fld_set}]}, + "direction": {to:[{field: "network.direction", setter: fld_set}]}, + "directory": {to:[{field: "file.directory", setter: fld_set}]}, + "dmacaddr": {convert: to_mac, to:[{field: "destination.mac", setter: fld_set}]}, + "dns.responsetype": {to:[{field: "dns.answers.type", setter: fld_set}]}, + "dns.resptext": {to:[{field: "dns.answers.name", setter: fld_set}]}, + "dns_querytype": {to:[{field: "dns.question.type", setter: fld_set}]}, + "domain": {to:[{field: "server.domain", setter: fld_prio, prio: 0},{field: "related.hosts", setter: fld_append}]}, + "domain.dst": {to:[{field: "destination.domain", setter: fld_prio, prio: 1}]}, + "domain.src": {to:[{field: "source.domain", setter: fld_prio, prio: 2}]}, + "domain_id": {to:[{field: "user.domain", setter: fld_set}]}, + "domainname": {to:[{field: "server.domain", setter: fld_prio, prio: 1}]}, + "dport": {convert: to_long, to:[{field: "destination.port", setter: fld_prio, prio: 0}]}, + "dtransaddr": {convert: to_ip, to:[{field: "destination.nat.ip", setter: fld_prio, prio: 0},{field: "related.ip", setter: fld_append}]}, + "dtransport": {convert: to_long, to:[{field: "destination.nat.port", setter: fld_prio, prio: 0}]}, + "ec_outcome": {to:[{field: "event.outcome", setter: fld_ecs_outcome}]}, + "event_description": {to:[{field: "message", setter: fld_prio, prio: 0}]}, + "event_source": {to:[{field: "related.hosts", setter: fld_append}]}, + "event_time": {convert: to_date, to:[{field: "@timestamp", setter: fld_set}]}, + "event_type": {to:[{field: "event.action", setter: fld_prio, prio: 1}]}, + "extension": {to:[{field: "file.extension", setter: fld_prio, prio: 1}]}, + "file.attributes": {to:[{field: "file.attributes", setter: fld_set}]}, + "filename": {to:[{field: "file.name", setter: fld_prio, prio: 0}]}, + "filename_size": {convert: to_long, to:[{field: "file.size", setter: fld_set}]}, + "filepath": {to:[{field: "file.path", setter: fld_set}]}, + "filetype": {to:[{field: "file.type", setter: fld_set}]}, + "fqdn": {to:[{field: "related.hosts", setter: fld_append}]}, + "group": {to:[{field: "group.name", setter: fld_set}]}, + "groupid": {to:[{field: "group.id", setter: fld_set}]}, + "host": {to:[{field: "host.name", setter: fld_prio, prio: 1},{field: "related.hosts", setter: fld_append}]}, + "hostip": {convert: to_ip, to:[{field: "host.ip", setter: fld_prio, prio: 0},{field: "related.ip", setter: fld_append}]}, + "hostip_v6": {convert: to_ip, to:[{field: "host.ip", setter: fld_prio, prio: 1},{field: "related.ip", setter: fld_append}]}, + "hostname": {to:[{field: "host.name", setter: fld_prio, prio: 0}]}, + "id": {to:[{field: "event.code", setter: fld_prio, prio: 0}]}, + "interface": {to:[{field: "network.interface.name", setter: fld_set}]}, + "ip.orig": {convert: to_ip, to:[{field: "network.forwarded_ip", setter: fld_prio, prio: 0},{field: "related.ip", setter: fld_append}]}, + "ip.trans.dst": {convert: to_ip, to:[{field: "destination.nat.ip", setter: fld_prio, prio: 1},{field: "related.ip", setter: fld_append}]}, + "ip.trans.src": {convert: to_ip, to:[{field: "source.nat.ip", setter: fld_prio, prio: 1},{field: "related.ip", setter: fld_append}]}, + "ipv6.orig": {convert: to_ip, to:[{field: "network.forwarded_ip", setter: fld_prio, prio: 2},{field: "related.ip", setter: fld_append}]}, + "latdec_dst": {convert: to_double, to:[{field: "destination.geo.location.lat", setter: fld_set}]}, + "latdec_src": {convert: to_double, to:[{field: "source.geo.location.lat", setter: fld_set}]}, + "location_city": {to:[{field: "geo.city_name", setter: fld_set}]}, + "location_country": {to:[{field: "geo.country_name", setter: fld_set}]}, + "location_desc": {to:[{field: "geo.name", setter: fld_set}]}, + "location_dst": {to:[{field: "destination.geo.country_name", setter: fld_set}]}, + "location_src": {to:[{field: "source.geo.country_name", setter: fld_set}]}, + "location_state": {to:[{field: "geo.region_name", setter: fld_set}]}, + "logon_id": {to:[{field: "related.user", setter: fld_append},{field: "user.name", setter: fld_prio, prio: 5}]}, + "longdec_dst": {convert: to_double, to:[{field: "destination.geo.location.lon", setter: fld_set}]}, + "longdec_src": {convert: to_double, to:[{field: "source.geo.location.lon", setter: fld_set}]}, + "macaddr": {convert: to_mac, to:[{field: "host.mac", setter: fld_prio, prio: 2}]}, + "messageid": {to:[{field: "event.code", setter: fld_prio, prio: 1}]}, + "method": {to:[{field: "http.request.method", setter: fld_set}]}, + "msg": {to:[{field: "message", setter: fld_set}]}, + "orig_ip": {convert: to_ip, to:[{field: "network.forwarded_ip", setter: fld_prio, prio: 1},{field: "related.ip", setter: fld_append}]}, + "owner": {to:[{field: "related.user", setter: fld_append},{field: "user.name", setter: fld_prio, prio: 6}]}, + "packets": {convert: to_long, to:[{field: "network.packets", setter: fld_set}]}, + "parent_pid": {convert: to_long, to:[{field: "process.parent.pid", setter: fld_prio, prio: 0}]}, + "parent_pid_val": {to:[{field: "process.parent.title", setter: fld_set}]}, + "parent_process": {to:[{field: "process.parent.name", setter: fld_prio, prio: 0}]}, + "patient_fullname": {to:[{field: "user.full_name", setter: fld_prio, prio: 1}]}, + "port.dst": {convert: to_long, to:[{field: "destination.port", setter: fld_prio, prio: 1}]}, + "port.src": {convert: to_long, to:[{field: "source.port", setter: fld_prio, prio: 1}]}, + "port.trans.dst": {convert: to_long, to:[{field: "destination.nat.port", setter: fld_prio, prio: 1}]}, + "port.trans.src": {convert: to_long, to:[{field: "source.nat.port", setter: fld_prio, prio: 1}]}, + "process": {to:[{field: "process.name", setter: fld_prio, prio: 0}]}, + "process_id": {convert: to_long, to:[{field: "process.pid", setter: fld_prio, prio: 0}]}, + "process_id_src": {convert: to_long, to:[{field: "process.parent.pid", setter: fld_prio, prio: 1}]}, + "process_src": {to:[{field: "process.parent.name", setter: fld_prio, prio: 1}]}, + "product": {to:[{field: "observer.product", setter: fld_set}]}, + "protocol": {to:[{field: "network.protocol", setter: fld_set}]}, + "query": {to:[{field: "url.query", setter: fld_prio, prio: 2}]}, + "rbytes": {convert: to_long, to:[{field: "destination.bytes", setter: fld_set}]}, + "referer": {to:[{field: "http.request.referrer", setter: fld_prio, prio: 1}]}, + "rulename": {to:[{field: "rule.name", setter: fld_set}]}, + "saddr": {convert: to_ip, to:[{field: "source.ip", setter: fld_set},{field: "related.ip", setter: fld_append}]}, + "saddr_v6": {convert: to_ip, to:[{field: "source.ip", setter: fld_set},{field: "related.ip", setter: fld_append}]}, + "sbytes": {convert: to_long, to:[{field: "source.bytes", setter: fld_set}]}, + "sdomain": {to:[{field: "source.domain", setter: fld_prio, prio: 0}]}, + "service": {to:[{field: "service.name", setter: fld_prio, prio: 1}]}, + "service.name": {to:[{field: "service.name", setter: fld_prio, prio: 0}]}, + "service_account": {to:[{field: "related.user", setter: fld_append},{field: "user.name", setter: fld_prio, prio: 7}]}, + "severity": {to:[{field: "log.level", setter: fld_set}]}, + "shost": {to:[{field: "host.hostname", setter: fld_set},{field: "source.address", setter: fld_set},{field: "related.hosts", setter: fld_append}]}, + "sinterface": {to:[{field: "observer.ingress.interface.name", setter: fld_set}]}, + "sld": {to:[{field: "url.registered_domain", setter: fld_set}]}, + "smacaddr": {convert: to_mac, to:[{field: "source.mac", setter: fld_set}]}, + "sport": {convert: to_long, to:[{field: "source.port", setter: fld_prio, prio: 0}]}, + "stransaddr": {convert: to_ip, to:[{field: "source.nat.ip", setter: fld_prio, prio: 0},{field: "related.ip", setter: fld_append}]}, + "stransport": {convert: to_long, to:[{field: "source.nat.port", setter: fld_prio, prio: 0}]}, + "tcp.dstport": {convert: to_long, to:[{field: "destination.port", setter: fld_prio, prio: 2}]}, + "tcp.srcport": {convert: to_long, to:[{field: "source.port", setter: fld_prio, prio: 2}]}, + "timezone": {to:[{field: "event.timezone", setter: fld_set}]}, + "tld": {to:[{field: "url.top_level_domain", setter: fld_prio, prio: 0}]}, + "udp.dstport": {convert: to_long, to:[{field: "destination.port", setter: fld_prio, prio: 3}]}, + "udp.srcport": {convert: to_long, to:[{field: "source.port", setter: fld_prio, prio: 3}]}, + "uid": {to:[{field: "related.user", setter: fld_append},{field: "user.name", setter: fld_prio, prio: 3}]}, + "url": {to:[{field: "url.original", setter: fld_prio, prio: 1}]}, + "url_raw": {to:[{field: "url.original", setter: fld_prio, prio: 0}]}, + "urldomain": {to:[{field: "url.domain", setter: fld_prio, prio: 0}]}, + "urlquery": {to:[{field: "url.query", setter: fld_prio, prio: 0}]}, + "user": {to:[{field: "related.user", setter: fld_append},{field: "user.name", setter: fld_prio, prio: 0}]}, + "user.id": {to:[{field: "user.id", setter: fld_prio, prio: 1}]}, + "user_agent": {to:[{field: "user_agent.original", setter: fld_set}]}, + "user_fullname": {to:[{field: "user.full_name", setter: fld_prio, prio: 0}]}, + "user_id": {to:[{field: "user.id", setter: fld_prio, prio: 0}]}, + "username": {to:[{field: "related.user", setter: fld_append},{field: "user.name", setter: fld_prio, prio: 1}]}, + "version": {to:[{field: "observer.version", setter: fld_set}]}, + "web_domain": {to:[{field: "url.domain", setter: fld_prio, prio: 1},{field: "related.hosts", setter: fld_append}]}, + "web_extension": {to:[{field: "file.extension", setter: fld_prio, prio: 0}]}, + "web_query": {to:[{field: "url.query", setter: fld_prio, prio: 1}]}, + "web_ref_domain": {to:[{field: "related.hosts", setter: fld_append}]}, + "web_referer": {to:[{field: "http.request.referrer", setter: fld_prio, prio: 0}]}, + "web_root": {to:[{field: "url.path", setter: fld_set}]}, + "webpage": {to:[{field: "file.name", setter: fld_prio, prio: 1}]}, + }; + + var rsa_mappings = { + "access_point": {to:[{field: "rsa.wireless.access_point", setter: fld_set}]}, + "accesses": {to:[{field: "rsa.identity.accesses", setter: fld_set}]}, + "acl_id": {to:[{field: "rsa.misc.acl_id", setter: fld_set}]}, + "acl_op": {to:[{field: "rsa.misc.acl_op", setter: fld_set}]}, + "acl_pos": {to:[{field: "rsa.misc.acl_pos", setter: fld_set}]}, + "acl_table": {to:[{field: "rsa.misc.acl_table", setter: fld_set}]}, + "action": {to:[{field: "rsa.misc.action", setter: fld_append}]}, + "ad_computer_dst": {to:[{field: "rsa.network.ad_computer_dst", setter: fld_set}]}, + "addr": {to:[{field: "rsa.network.addr", setter: fld_set}]}, + "admin": {to:[{field: "rsa.misc.admin", setter: fld_set}]}, + "agent": {to:[{field: "rsa.misc.client", setter: fld_prio, prio: 0}]}, + "agent.id": {to:[{field: "rsa.misc.agent_id", setter: fld_set}]}, + "alarm_id": {to:[{field: "rsa.misc.alarm_id", setter: fld_set}]}, + "alarmname": {to:[{field: "rsa.misc.alarmname", setter: fld_set}]}, + "alert": {to:[{field: "rsa.threat.alert", setter: fld_set}]}, + "alert_id": {to:[{field: "rsa.misc.alert_id", setter: fld_set}]}, + "alias.host": {to:[{field: "rsa.network.alias_host", setter: fld_append}]}, + "analysis.file": {to:[{field: "rsa.investigations.analysis_file", setter: fld_set}]}, + "analysis.service": {to:[{field: "rsa.investigations.analysis_service", setter: fld_set}]}, + "analysis.session": {to:[{field: "rsa.investigations.analysis_session", setter: fld_set}]}, + "app_id": {to:[{field: "rsa.misc.app_id", setter: fld_set}]}, + "attachment": {to:[{field: "rsa.file.attachment", setter: fld_set}]}, + "audit": {to:[{field: "rsa.misc.audit", setter: fld_set}]}, + "audit_class": {to:[{field: "rsa.internal.audit_class", setter: fld_set}]}, + "audit_object": {to:[{field: "rsa.misc.audit_object", setter: fld_set}]}, + "auditdata": {to:[{field: "rsa.misc.auditdata", setter: fld_set}]}, + "authmethod": {to:[{field: "rsa.identity.auth_method", setter: fld_set}]}, + "autorun_type": {to:[{field: "rsa.misc.autorun_type", setter: fld_set}]}, + "bcc": {to:[{field: "rsa.email.email", setter: fld_append}]}, + "benchmark": {to:[{field: "rsa.misc.benchmark", setter: fld_set}]}, + "binary": {to:[{field: "rsa.file.binary", setter: fld_set}]}, + "boc": {to:[{field: "rsa.investigations.boc", setter: fld_set}]}, + "bssid": {to:[{field: "rsa.wireless.wlan_ssid", setter: fld_prio, prio: 1}]}, + "bypass": {to:[{field: "rsa.misc.bypass", setter: fld_set}]}, + "c_sid": {to:[{field: "rsa.identity.user_sid_src", setter: fld_set}]}, + "cache": {to:[{field: "rsa.misc.cache", setter: fld_set}]}, + "cache_hit": {to:[{field: "rsa.misc.cache_hit", setter: fld_set}]}, + "calling_from": {to:[{field: "rsa.misc.phone", setter: fld_prio, prio: 1}]}, + "calling_to": {to:[{field: "rsa.misc.phone", setter: fld_prio, prio: 0}]}, + "category": {to:[{field: "rsa.misc.category", setter: fld_set}]}, + "cc": {to:[{field: "rsa.email.email", setter: fld_append}]}, + "cc.number": {convert: to_long, to:[{field: "rsa.misc.cc_number", setter: fld_set}]}, + "cefversion": {to:[{field: "rsa.misc.cefversion", setter: fld_set}]}, + "cert.serial": {to:[{field: "rsa.crypto.cert_serial", setter: fld_set}]}, + "cert_ca": {to:[{field: "rsa.crypto.cert_ca", setter: fld_set}]}, + "cert_checksum": {to:[{field: "rsa.crypto.cert_checksum", setter: fld_set}]}, + "cert_common": {to:[{field: "rsa.crypto.cert_common", setter: fld_set}]}, + "cert_error": {to:[{field: "rsa.crypto.cert_error", setter: fld_set}]}, + "cert_hostname": {to:[{field: "rsa.crypto.cert_host_name", setter: fld_set}]}, + "cert_hostname_cat": {to:[{field: "rsa.crypto.cert_host_cat", setter: fld_set}]}, + "cert_issuer": {to:[{field: "rsa.crypto.cert_issuer", setter: fld_set}]}, + "cert_keysize": {to:[{field: "rsa.crypto.cert_keysize", setter: fld_set}]}, + "cert_status": {to:[{field: "rsa.crypto.cert_status", setter: fld_set}]}, + "cert_subject": {to:[{field: "rsa.crypto.cert_subject", setter: fld_set}]}, + "cert_username": {to:[{field: "rsa.crypto.cert_username", setter: fld_set}]}, + "cfg.attr": {to:[{field: "rsa.misc.cfg_attr", setter: fld_set}]}, + "cfg.obj": {to:[{field: "rsa.misc.cfg_obj", setter: fld_set}]}, + "cfg.path": {to:[{field: "rsa.misc.cfg_path", setter: fld_set}]}, + "change_attribute": {to:[{field: "rsa.misc.change_attrib", setter: fld_set}]}, + "change_new": {to:[{field: "rsa.misc.change_new", setter: fld_set}]}, + "change_old": {to:[{field: "rsa.misc.change_old", setter: fld_set}]}, + "changes": {to:[{field: "rsa.misc.changes", setter: fld_set}]}, + "checksum": {to:[{field: "rsa.misc.checksum", setter: fld_set}]}, + "checksum.dst": {to:[{field: "rsa.misc.checksum_dst", setter: fld_set}]}, + "checksum.src": {to:[{field: "rsa.misc.checksum_src", setter: fld_set}]}, + "cid": {to:[{field: "rsa.internal.cid", setter: fld_set}]}, + "client": {to:[{field: "rsa.misc.client", setter: fld_prio, prio: 1}]}, + "client_ip": {to:[{field: "rsa.misc.client_ip", setter: fld_set}]}, + "clustermembers": {to:[{field: "rsa.misc.clustermembers", setter: fld_set}]}, + "cmd": {to:[{field: "rsa.misc.cmd", setter: fld_set}]}, + "cn_acttimeout": {to:[{field: "rsa.misc.cn_acttimeout", setter: fld_set}]}, + "cn_asn_dst": {to:[{field: "rsa.web.cn_asn_dst", setter: fld_set}]}, + "cn_asn_src": {to:[{field: "rsa.misc.cn_asn_src", setter: fld_set}]}, + "cn_bgpv4nxthop": {to:[{field: "rsa.misc.cn_bgpv4nxthop", setter: fld_set}]}, + "cn_ctr_dst_code": {to:[{field: "rsa.misc.cn_ctr_dst_code", setter: fld_set}]}, + "cn_dst_tos": {to:[{field: "rsa.misc.cn_dst_tos", setter: fld_set}]}, + "cn_dst_vlan": {to:[{field: "rsa.misc.cn_dst_vlan", setter: fld_set}]}, + "cn_engine_id": {to:[{field: "rsa.misc.cn_engine_id", setter: fld_set}]}, + "cn_engine_type": {to:[{field: "rsa.misc.cn_engine_type", setter: fld_set}]}, + "cn_f_switch": {to:[{field: "rsa.misc.cn_f_switch", setter: fld_set}]}, + "cn_flowsampid": {to:[{field: "rsa.misc.cn_flowsampid", setter: fld_set}]}, + "cn_flowsampintv": {to:[{field: "rsa.misc.cn_flowsampintv", setter: fld_set}]}, + "cn_flowsampmode": {to:[{field: "rsa.misc.cn_flowsampmode", setter: fld_set}]}, + "cn_inacttimeout": {to:[{field: "rsa.misc.cn_inacttimeout", setter: fld_set}]}, + "cn_inpermbyts": {to:[{field: "rsa.misc.cn_inpermbyts", setter: fld_set}]}, + "cn_inpermpckts": {to:[{field: "rsa.misc.cn_inpermpckts", setter: fld_set}]}, + "cn_invalid": {to:[{field: "rsa.misc.cn_invalid", setter: fld_set}]}, + "cn_ip_proto_ver": {to:[{field: "rsa.misc.cn_ip_proto_ver", setter: fld_set}]}, + "cn_ipv4_ident": {to:[{field: "rsa.misc.cn_ipv4_ident", setter: fld_set}]}, + "cn_l_switch": {to:[{field: "rsa.misc.cn_l_switch", setter: fld_set}]}, + "cn_log_did": {to:[{field: "rsa.misc.cn_log_did", setter: fld_set}]}, + "cn_log_rid": {to:[{field: "rsa.misc.cn_log_rid", setter: fld_set}]}, + "cn_max_ttl": {to:[{field: "rsa.misc.cn_max_ttl", setter: fld_set}]}, + "cn_maxpcktlen": {to:[{field: "rsa.misc.cn_maxpcktlen", setter: fld_set}]}, + "cn_min_ttl": {to:[{field: "rsa.misc.cn_min_ttl", setter: fld_set}]}, + "cn_minpcktlen": {to:[{field: "rsa.misc.cn_minpcktlen", setter: fld_set}]}, + "cn_mpls_lbl_1": {to:[{field: "rsa.misc.cn_mpls_lbl_1", setter: fld_set}]}, + "cn_mpls_lbl_10": {to:[{field: "rsa.misc.cn_mpls_lbl_10", setter: fld_set}]}, + "cn_mpls_lbl_2": {to:[{field: "rsa.misc.cn_mpls_lbl_2", setter: fld_set}]}, + "cn_mpls_lbl_3": {to:[{field: "rsa.misc.cn_mpls_lbl_3", setter: fld_set}]}, + "cn_mpls_lbl_4": {to:[{field: "rsa.misc.cn_mpls_lbl_4", setter: fld_set}]}, + "cn_mpls_lbl_5": {to:[{field: "rsa.misc.cn_mpls_lbl_5", setter: fld_set}]}, + "cn_mpls_lbl_6": {to:[{field: "rsa.misc.cn_mpls_lbl_6", setter: fld_set}]}, + "cn_mpls_lbl_7": {to:[{field: "rsa.misc.cn_mpls_lbl_7", setter: fld_set}]}, + "cn_mpls_lbl_8": {to:[{field: "rsa.misc.cn_mpls_lbl_8", setter: fld_set}]}, + "cn_mpls_lbl_9": {to:[{field: "rsa.misc.cn_mpls_lbl_9", setter: fld_set}]}, + "cn_mplstoplabel": {to:[{field: "rsa.misc.cn_mplstoplabel", setter: fld_set}]}, + "cn_mplstoplabip": {to:[{field: "rsa.misc.cn_mplstoplabip", setter: fld_set}]}, + "cn_mul_dst_byt": {to:[{field: "rsa.misc.cn_mul_dst_byt", setter: fld_set}]}, + "cn_mul_dst_pks": {to:[{field: "rsa.misc.cn_mul_dst_pks", setter: fld_set}]}, + "cn_muligmptype": {to:[{field: "rsa.misc.cn_muligmptype", setter: fld_set}]}, + "cn_rpackets": {to:[{field: "rsa.web.cn_rpackets", setter: fld_set}]}, + "cn_sampalgo": {to:[{field: "rsa.misc.cn_sampalgo", setter: fld_set}]}, + "cn_sampint": {to:[{field: "rsa.misc.cn_sampint", setter: fld_set}]}, + "cn_seqctr": {to:[{field: "rsa.misc.cn_seqctr", setter: fld_set}]}, + "cn_spackets": {to:[{field: "rsa.misc.cn_spackets", setter: fld_set}]}, + "cn_src_tos": {to:[{field: "rsa.misc.cn_src_tos", setter: fld_set}]}, + "cn_src_vlan": {to:[{field: "rsa.misc.cn_src_vlan", setter: fld_set}]}, + "cn_sysuptime": {to:[{field: "rsa.misc.cn_sysuptime", setter: fld_set}]}, + "cn_template_id": {to:[{field: "rsa.misc.cn_template_id", setter: fld_set}]}, + "cn_totbytsexp": {to:[{field: "rsa.misc.cn_totbytsexp", setter: fld_set}]}, + "cn_totflowexp": {to:[{field: "rsa.misc.cn_totflowexp", setter: fld_set}]}, + "cn_totpcktsexp": {to:[{field: "rsa.misc.cn_totpcktsexp", setter: fld_set}]}, + "cn_unixnanosecs": {to:[{field: "rsa.misc.cn_unixnanosecs", setter: fld_set}]}, + "cn_v6flowlabel": {to:[{field: "rsa.misc.cn_v6flowlabel", setter: fld_set}]}, + "cn_v6optheaders": {to:[{field: "rsa.misc.cn_v6optheaders", setter: fld_set}]}, + "code": {to:[{field: "rsa.misc.code", setter: fld_set}]}, + "command": {to:[{field: "rsa.misc.command", setter: fld_set}]}, + "comments": {to:[{field: "rsa.misc.comments", setter: fld_set}]}, + "comp_class": {to:[{field: "rsa.misc.comp_class", setter: fld_set}]}, + "comp_name": {to:[{field: "rsa.misc.comp_name", setter: fld_set}]}, + "comp_rbytes": {to:[{field: "rsa.misc.comp_rbytes", setter: fld_set}]}, + "comp_sbytes": {to:[{field: "rsa.misc.comp_sbytes", setter: fld_set}]}, + "component_version": {to:[{field: "rsa.misc.comp_version", setter: fld_set}]}, + "connection_id": {to:[{field: "rsa.misc.connection_id", setter: fld_prio, prio: 1}]}, + "connectionid": {to:[{field: "rsa.misc.connection_id", setter: fld_prio, prio: 0}]}, + "content": {to:[{field: "rsa.misc.content", setter: fld_set}]}, + "content_type": {to:[{field: "rsa.misc.content_type", setter: fld_set}]}, + "content_version": {to:[{field: "rsa.misc.content_version", setter: fld_set}]}, + "context": {to:[{field: "rsa.misc.context", setter: fld_set}]}, + "count": {to:[{field: "rsa.misc.count", setter: fld_set}]}, + "cpu": {convert: to_long, to:[{field: "rsa.misc.cpu", setter: fld_set}]}, + "cpu_data": {to:[{field: "rsa.misc.cpu_data", setter: fld_set}]}, + "criticality": {to:[{field: "rsa.misc.criticality", setter: fld_set}]}, + "cs_agency_dst": {to:[{field: "rsa.misc.cs_agency_dst", setter: fld_set}]}, + "cs_analyzedby": {to:[{field: "rsa.misc.cs_analyzedby", setter: fld_set}]}, + "cs_av_other": {to:[{field: "rsa.misc.cs_av_other", setter: fld_set}]}, + "cs_av_primary": {to:[{field: "rsa.misc.cs_av_primary", setter: fld_set}]}, + "cs_av_secondary": {to:[{field: "rsa.misc.cs_av_secondary", setter: fld_set}]}, + "cs_bgpv6nxthop": {to:[{field: "rsa.misc.cs_bgpv6nxthop", setter: fld_set}]}, + "cs_bit9status": {to:[{field: "rsa.misc.cs_bit9status", setter: fld_set}]}, + "cs_context": {to:[{field: "rsa.misc.cs_context", setter: fld_set}]}, + "cs_control": {to:[{field: "rsa.misc.cs_control", setter: fld_set}]}, + "cs_data": {to:[{field: "rsa.misc.cs_data", setter: fld_set}]}, + "cs_datecret": {to:[{field: "rsa.misc.cs_datecret", setter: fld_set}]}, + "cs_dst_tld": {to:[{field: "rsa.misc.cs_dst_tld", setter: fld_set}]}, + "cs_eth_dst_ven": {to:[{field: "rsa.misc.cs_eth_dst_ven", setter: fld_set}]}, + "cs_eth_src_ven": {to:[{field: "rsa.misc.cs_eth_src_ven", setter: fld_set}]}, + "cs_event_uuid": {to:[{field: "rsa.misc.cs_event_uuid", setter: fld_set}]}, + "cs_filetype": {to:[{field: "rsa.misc.cs_filetype", setter: fld_set}]}, + "cs_fld": {to:[{field: "rsa.misc.cs_fld", setter: fld_set}]}, + "cs_if_desc": {to:[{field: "rsa.misc.cs_if_desc", setter: fld_set}]}, + "cs_if_name": {to:[{field: "rsa.misc.cs_if_name", setter: fld_set}]}, + "cs_ip_next_hop": {to:[{field: "rsa.misc.cs_ip_next_hop", setter: fld_set}]}, + "cs_ipv4dstpre": {to:[{field: "rsa.misc.cs_ipv4dstpre", setter: fld_set}]}, + "cs_ipv4srcpre": {to:[{field: "rsa.misc.cs_ipv4srcpre", setter: fld_set}]}, + "cs_lifetime": {to:[{field: "rsa.misc.cs_lifetime", setter: fld_set}]}, + "cs_log_medium": {to:[{field: "rsa.misc.cs_log_medium", setter: fld_set}]}, + "cs_loginname": {to:[{field: "rsa.misc.cs_loginname", setter: fld_set}]}, + "cs_modulescore": {to:[{field: "rsa.misc.cs_modulescore", setter: fld_set}]}, + "cs_modulesign": {to:[{field: "rsa.misc.cs_modulesign", setter: fld_set}]}, + "cs_opswatresult": {to:[{field: "rsa.misc.cs_opswatresult", setter: fld_set}]}, + "cs_payload": {to:[{field: "rsa.misc.cs_payload", setter: fld_set}]}, + "cs_registrant": {to:[{field: "rsa.misc.cs_registrant", setter: fld_set}]}, + "cs_registrar": {to:[{field: "rsa.misc.cs_registrar", setter: fld_set}]}, + "cs_represult": {to:[{field: "rsa.misc.cs_represult", setter: fld_set}]}, + "cs_rpayload": {to:[{field: "rsa.misc.cs_rpayload", setter: fld_set}]}, + "cs_sampler_name": {to:[{field: "rsa.misc.cs_sampler_name", setter: fld_set}]}, + "cs_sourcemodule": {to:[{field: "rsa.misc.cs_sourcemodule", setter: fld_set}]}, + "cs_streams": {to:[{field: "rsa.misc.cs_streams", setter: fld_set}]}, + "cs_targetmodule": {to:[{field: "rsa.misc.cs_targetmodule", setter: fld_set}]}, + "cs_v6nxthop": {to:[{field: "rsa.misc.cs_v6nxthop", setter: fld_set}]}, + "cs_whois_server": {to:[{field: "rsa.misc.cs_whois_server", setter: fld_set}]}, + "cs_yararesult": {to:[{field: "rsa.misc.cs_yararesult", setter: fld_set}]}, + "cve": {to:[{field: "rsa.misc.cve", setter: fld_set}]}, + "d_certauth": {to:[{field: "rsa.crypto.d_certauth", setter: fld_set}]}, + "d_cipher": {to:[{field: "rsa.crypto.cipher_dst", setter: fld_set}]}, + "d_ciphersize": {convert: to_long, to:[{field: "rsa.crypto.cipher_size_dst", setter: fld_set}]}, + "d_sslver": {to:[{field: "rsa.crypto.ssl_ver_dst", setter: fld_set}]}, + "data": {to:[{field: "rsa.internal.data", setter: fld_set}]}, + "data_type": {to:[{field: "rsa.misc.data_type", setter: fld_set}]}, + "date": {to:[{field: "rsa.time.date", setter: fld_set}]}, + "datetime": {to:[{field: "rsa.time.datetime", setter: fld_set}]}, + "day": {to:[{field: "rsa.time.day", setter: fld_set}]}, + "db_id": {to:[{field: "rsa.db.db_id", setter: fld_set}]}, + "db_name": {to:[{field: "rsa.db.database", setter: fld_set}]}, + "db_pid": {convert: to_long, to:[{field: "rsa.db.db_pid", setter: fld_set}]}, + "dclass_counter1": {convert: to_long, to:[{field: "rsa.counters.dclass_c1", setter: fld_set}]}, + "dclass_counter1_string": {to:[{field: "rsa.counters.dclass_c1_str", setter: fld_set}]}, + "dclass_counter2": {convert: to_long, to:[{field: "rsa.counters.dclass_c2", setter: fld_set}]}, + "dclass_counter2_string": {to:[{field: "rsa.counters.dclass_c2_str", setter: fld_set}]}, + "dclass_counter3": {convert: to_long, to:[{field: "rsa.counters.dclass_c3", setter: fld_set}]}, + "dclass_counter3_string": {to:[{field: "rsa.counters.dclass_c3_str", setter: fld_set}]}, + "dclass_ratio1": {to:[{field: "rsa.counters.dclass_r1", setter: fld_set}]}, + "dclass_ratio1_string": {to:[{field: "rsa.counters.dclass_r1_str", setter: fld_set}]}, + "dclass_ratio2": {to:[{field: "rsa.counters.dclass_r2", setter: fld_set}]}, + "dclass_ratio2_string": {to:[{field: "rsa.counters.dclass_r2_str", setter: fld_set}]}, + "dclass_ratio3": {to:[{field: "rsa.counters.dclass_r3", setter: fld_set}]}, + "dclass_ratio3_string": {to:[{field: "rsa.counters.dclass_r3_str", setter: fld_set}]}, + "dead": {convert: to_long, to:[{field: "rsa.internal.dead", setter: fld_set}]}, + "description": {to:[{field: "rsa.misc.description", setter: fld_set}]}, + "detail": {to:[{field: "rsa.misc.event_desc", setter: fld_set}]}, + "device": {to:[{field: "rsa.misc.device_name", setter: fld_set}]}, + "device.class": {to:[{field: "rsa.internal.device_class", setter: fld_set}]}, + "device.group": {to:[{field: "rsa.internal.device_group", setter: fld_set}]}, + "device.host": {to:[{field: "rsa.internal.device_host", setter: fld_set}]}, + "device.ip": {convert: to_ip, to:[{field: "rsa.internal.device_ip", setter: fld_set}]}, + "device.ipv6": {convert: to_ip, to:[{field: "rsa.internal.device_ipv6", setter: fld_set}]}, + "device.type": {to:[{field: "rsa.internal.device_type", setter: fld_set}]}, + "device.type.id": {convert: to_long, to:[{field: "rsa.internal.device_type_id", setter: fld_set}]}, + "devicehostname": {to:[{field: "rsa.network.alias_host", setter: fld_append}]}, + "devvendor": {to:[{field: "rsa.misc.devvendor", setter: fld_set}]}, + "dhost": {to:[{field: "rsa.network.host_dst", setter: fld_set}]}, + "did": {to:[{field: "rsa.internal.did", setter: fld_set}]}, + "dinterface": {to:[{field: "rsa.network.dinterface", setter: fld_set}]}, + "directory.dst": {to:[{field: "rsa.file.directory_dst", setter: fld_set}]}, + "directory.src": {to:[{field: "rsa.file.directory_src", setter: fld_set}]}, + "disk_volume": {to:[{field: "rsa.storage.disk_volume", setter: fld_set}]}, + "disposition": {to:[{field: "rsa.misc.disposition", setter: fld_set}]}, + "distance": {to:[{field: "rsa.misc.distance", setter: fld_set}]}, + "dmask": {to:[{field: "rsa.network.dmask", setter: fld_set}]}, + "dn": {to:[{field: "rsa.identity.dn", setter: fld_set}]}, + "dns_a_record": {to:[{field: "rsa.network.dns_a_record", setter: fld_set}]}, + "dns_cname_record": {to:[{field: "rsa.network.dns_cname_record", setter: fld_set}]}, + "dns_id": {to:[{field: "rsa.network.dns_id", setter: fld_set}]}, + "dns_opcode": {to:[{field: "rsa.network.dns_opcode", setter: fld_set}]}, + "dns_ptr_record": {to:[{field: "rsa.network.dns_ptr_record", setter: fld_set}]}, + "dns_resp": {to:[{field: "rsa.network.dns_resp", setter: fld_set}]}, + "dns_type": {to:[{field: "rsa.network.dns_type", setter: fld_set}]}, + "doc_number": {convert: to_long, to:[{field: "rsa.misc.doc_number", setter: fld_set}]}, + "domain": {to:[{field: "rsa.network.domain", setter: fld_set}]}, + "domain1": {to:[{field: "rsa.network.domain1", setter: fld_set}]}, + "dst_dn": {to:[{field: "rsa.identity.dn_dst", setter: fld_set}]}, + "dst_payload": {to:[{field: "rsa.misc.payload_dst", setter: fld_set}]}, + "dst_spi": {to:[{field: "rsa.misc.spi_dst", setter: fld_set}]}, + "dst_zone": {to:[{field: "rsa.network.zone_dst", setter: fld_set}]}, + "dstburb": {to:[{field: "rsa.misc.dstburb", setter: fld_set}]}, + "duration": {convert: to_double, to:[{field: "rsa.time.duration_time", setter: fld_set}]}, + "duration_string": {to:[{field: "rsa.time.duration_str", setter: fld_set}]}, + "ec_activity": {to:[{field: "rsa.investigations.ec_activity", setter: fld_set}]}, + "ec_outcome": {to:[{field: "rsa.investigations.ec_outcome", setter: fld_set}]}, + "ec_subject": {to:[{field: "rsa.investigations.ec_subject", setter: fld_set}]}, + "ec_theme": {to:[{field: "rsa.investigations.ec_theme", setter: fld_set}]}, + "edomain": {to:[{field: "rsa.misc.edomain", setter: fld_set}]}, + "edomaub": {to:[{field: "rsa.misc.edomaub", setter: fld_set}]}, + "effective_time": {convert: to_date, to:[{field: "rsa.time.effective_time", setter: fld_set}]}, + "ein.number": {convert: to_long, to:[{field: "rsa.misc.ein_number", setter: fld_set}]}, + "email": {to:[{field: "rsa.email.email", setter: fld_append}]}, + "encryption_type": {to:[{field: "rsa.crypto.crypto", setter: fld_set}]}, + "endtime": {convert: to_date, to:[{field: "rsa.time.endtime", setter: fld_set}]}, + "entropy.req": {convert: to_long, to:[{field: "rsa.internal.entropy_req", setter: fld_set}]}, + "entropy.res": {convert: to_long, to:[{field: "rsa.internal.entropy_res", setter: fld_set}]}, + "entry": {to:[{field: "rsa.internal.entry", setter: fld_set}]}, + "eoc": {to:[{field: "rsa.investigations.eoc", setter: fld_set}]}, + "error": {to:[{field: "rsa.misc.error", setter: fld_set}]}, + "eth_type": {convert: to_long, to:[{field: "rsa.network.eth_type", setter: fld_set}]}, + "euid": {to:[{field: "rsa.misc.euid", setter: fld_set}]}, + "event.cat": {convert: to_long, to:[{field: "rsa.investigations.event_cat", setter: fld_prio, prio: 1}]}, + "event.cat.name": {to:[{field: "rsa.investigations.event_cat_name", setter: fld_prio, prio: 1}]}, + "event_cat": {convert: to_long, to:[{field: "rsa.investigations.event_cat", setter: fld_prio, prio: 0}]}, + "event_cat_name": {to:[{field: "rsa.investigations.event_cat_name", setter: fld_prio, prio: 0}]}, + "event_category": {to:[{field: "rsa.misc.event_category", setter: fld_set}]}, + "event_computer": {to:[{field: "rsa.misc.event_computer", setter: fld_set}]}, + "event_counter": {convert: to_long, to:[{field: "rsa.counters.event_counter", setter: fld_set}]}, + "event_description": {to:[{field: "rsa.internal.event_desc", setter: fld_set}]}, + "event_id": {to:[{field: "rsa.misc.event_id", setter: fld_set}]}, + "event_log": {to:[{field: "rsa.misc.event_log", setter: fld_set}]}, + "event_name": {to:[{field: "rsa.internal.event_name", setter: fld_set}]}, + "event_queue_time": {convert: to_date, to:[{field: "rsa.time.event_queue_time", setter: fld_set}]}, + "event_source": {to:[{field: "rsa.misc.event_source", setter: fld_set}]}, + "event_state": {to:[{field: "rsa.misc.event_state", setter: fld_set}]}, + "event_time": {convert: to_date, to:[{field: "rsa.time.event_time", setter: fld_set}]}, + "event_time_str": {to:[{field: "rsa.time.event_time_str", setter: fld_prio, prio: 1}]}, + "event_time_string": {to:[{field: "rsa.time.event_time_str", setter: fld_prio, prio: 0}]}, + "event_type": {to:[{field: "rsa.misc.event_type", setter: fld_set}]}, + "event_user": {to:[{field: "rsa.misc.event_user", setter: fld_set}]}, + "eventtime": {to:[{field: "rsa.time.eventtime", setter: fld_set}]}, + "expected_val": {to:[{field: "rsa.misc.expected_val", setter: fld_set}]}, + "expiration_time": {convert: to_date, to:[{field: "rsa.time.expire_time", setter: fld_set}]}, + "expiration_time_string": {to:[{field: "rsa.time.expire_time_str", setter: fld_set}]}, + "facility": {to:[{field: "rsa.misc.facility", setter: fld_set}]}, + "facilityname": {to:[{field: "rsa.misc.facilityname", setter: fld_set}]}, + "faddr": {to:[{field: "rsa.network.faddr", setter: fld_set}]}, + "fcatnum": {to:[{field: "rsa.misc.fcatnum", setter: fld_set}]}, + "federated_idp": {to:[{field: "rsa.identity.federated_idp", setter: fld_set}]}, + "federated_sp": {to:[{field: "rsa.identity.federated_sp", setter: fld_set}]}, + "feed.category": {to:[{field: "rsa.internal.feed_category", setter: fld_set}]}, + "feed_desc": {to:[{field: "rsa.internal.feed_desc", setter: fld_set}]}, + "feed_name": {to:[{field: "rsa.internal.feed_name", setter: fld_set}]}, + "fhost": {to:[{field: "rsa.network.fhost", setter: fld_set}]}, + "file_entropy": {convert: to_double, to:[{field: "rsa.file.file_entropy", setter: fld_set}]}, + "file_vendor": {to:[{field: "rsa.file.file_vendor", setter: fld_set}]}, + "filename_dst": {to:[{field: "rsa.file.filename_dst", setter: fld_set}]}, + "filename_src": {to:[{field: "rsa.file.filename_src", setter: fld_set}]}, + "filename_tmp": {to:[{field: "rsa.file.filename_tmp", setter: fld_set}]}, + "filesystem": {to:[{field: "rsa.file.filesystem", setter: fld_set}]}, + "filter": {to:[{field: "rsa.misc.filter", setter: fld_set}]}, + "finterface": {to:[{field: "rsa.misc.finterface", setter: fld_set}]}, + "flags": {to:[{field: "rsa.misc.flags", setter: fld_set}]}, + "forensic_info": {to:[{field: "rsa.misc.forensic_info", setter: fld_set}]}, + "forward.ip": {convert: to_ip, to:[{field: "rsa.internal.forward_ip", setter: fld_set}]}, + "forward.ipv6": {convert: to_ip, to:[{field: "rsa.internal.forward_ipv6", setter: fld_set}]}, + "found": {to:[{field: "rsa.misc.found", setter: fld_set}]}, + "fport": {to:[{field: "rsa.network.fport", setter: fld_set}]}, + "fqdn": {to:[{field: "rsa.web.fqdn", setter: fld_set}]}, + "fresult": {convert: to_long, to:[{field: "rsa.misc.fresult", setter: fld_set}]}, + "from": {to:[{field: "rsa.email.email_src", setter: fld_set}]}, + "gaddr": {to:[{field: "rsa.misc.gaddr", setter: fld_set}]}, + "gateway": {to:[{field: "rsa.network.gateway", setter: fld_set}]}, + "gmtdate": {to:[{field: "rsa.time.gmtdate", setter: fld_set}]}, + "gmttime": {to:[{field: "rsa.time.gmttime", setter: fld_set}]}, + "group": {to:[{field: "rsa.misc.group", setter: fld_set}]}, + "group_object": {to:[{field: "rsa.misc.group_object", setter: fld_set}]}, + "groupid": {to:[{field: "rsa.misc.group_id", setter: fld_set}]}, + "h_code": {to:[{field: "rsa.internal.hcode", setter: fld_set}]}, + "hardware_id": {to:[{field: "rsa.misc.hardware_id", setter: fld_set}]}, + "header.id": {to:[{field: "rsa.internal.header_id", setter: fld_set}]}, + "host.orig": {to:[{field: "rsa.network.host_orig", setter: fld_set}]}, + "host.state": {to:[{field: "rsa.endpoint.host_state", setter: fld_set}]}, + "host.type": {to:[{field: "rsa.network.host_type", setter: fld_set}]}, + "host_role": {to:[{field: "rsa.identity.host_role", setter: fld_set}]}, + "hostid": {to:[{field: "rsa.network.alias_host", setter: fld_append}]}, + "hostname": {to:[{field: "rsa.network.alias_host", setter: fld_append}]}, + "hour": {to:[{field: "rsa.time.hour", setter: fld_set}]}, + "https.insact": {to:[{field: "rsa.crypto.https_insact", setter: fld_set}]}, + "https.valid": {to:[{field: "rsa.crypto.https_valid", setter: fld_set}]}, + "icmpcode": {convert: to_long, to:[{field: "rsa.network.icmp_code", setter: fld_set}]}, + "icmptype": {convert: to_long, to:[{field: "rsa.network.icmp_type", setter: fld_set}]}, + "id": {to:[{field: "rsa.misc.reference_id", setter: fld_set}]}, + "id1": {to:[{field: "rsa.misc.reference_id1", setter: fld_set}]}, + "id2": {to:[{field: "rsa.misc.reference_id2", setter: fld_set}]}, + "id3": {to:[{field: "rsa.misc.id3", setter: fld_set}]}, + "ike": {to:[{field: "rsa.crypto.ike", setter: fld_set}]}, + "ike_cookie1": {to:[{field: "rsa.crypto.ike_cookie1", setter: fld_set}]}, + "ike_cookie2": {to:[{field: "rsa.crypto.ike_cookie2", setter: fld_set}]}, + "im_buddyid": {to:[{field: "rsa.misc.im_buddyid", setter: fld_set}]}, + "im_buddyname": {to:[{field: "rsa.misc.im_buddyname", setter: fld_set}]}, + "im_client": {to:[{field: "rsa.misc.im_client", setter: fld_set}]}, + "im_croomid": {to:[{field: "rsa.misc.im_croomid", setter: fld_set}]}, + "im_croomtype": {to:[{field: "rsa.misc.im_croomtype", setter: fld_set}]}, + "im_members": {to:[{field: "rsa.misc.im_members", setter: fld_set}]}, + "im_userid": {to:[{field: "rsa.misc.im_userid", setter: fld_set}]}, + "im_username": {to:[{field: "rsa.misc.im_username", setter: fld_set}]}, + "index": {to:[{field: "rsa.misc.index", setter: fld_set}]}, + "info": {to:[{field: "rsa.db.index", setter: fld_set}]}, + "inode": {convert: to_long, to:[{field: "rsa.internal.inode", setter: fld_set}]}, + "inout": {to:[{field: "rsa.misc.inout", setter: fld_set}]}, + "instance": {to:[{field: "rsa.db.instance", setter: fld_set}]}, + "interface": {to:[{field: "rsa.network.interface", setter: fld_set}]}, + "inv.category": {to:[{field: "rsa.investigations.inv_category", setter: fld_set}]}, + "inv.context": {to:[{field: "rsa.investigations.inv_context", setter: fld_set}]}, + "ioc": {to:[{field: "rsa.investigations.ioc", setter: fld_set}]}, + "ip_proto": {convert: to_long, to:[{field: "rsa.network.ip_proto", setter: fld_set}]}, + "ipkt": {to:[{field: "rsa.misc.ipkt", setter: fld_set}]}, + "ipscat": {to:[{field: "rsa.misc.ipscat", setter: fld_set}]}, + "ipspri": {to:[{field: "rsa.misc.ipspri", setter: fld_set}]}, + "jobname": {to:[{field: "rsa.misc.jobname", setter: fld_set}]}, + "jobnum": {to:[{field: "rsa.misc.job_num", setter: fld_set}]}, + "laddr": {to:[{field: "rsa.network.laddr", setter: fld_set}]}, + "language": {to:[{field: "rsa.misc.language", setter: fld_set}]}, + "latitude": {to:[{field: "rsa.misc.latitude", setter: fld_set}]}, + "lc.cid": {to:[{field: "rsa.internal.lc_cid", setter: fld_set}]}, + "lc.ctime": {convert: to_date, to:[{field: "rsa.internal.lc_ctime", setter: fld_set}]}, + "ldap": {to:[{field: "rsa.identity.ldap", setter: fld_set}]}, + "ldap.query": {to:[{field: "rsa.identity.ldap_query", setter: fld_set}]}, + "ldap.response": {to:[{field: "rsa.identity.ldap_response", setter: fld_set}]}, + "level": {convert: to_long, to:[{field: "rsa.internal.level", setter: fld_set}]}, + "lhost": {to:[{field: "rsa.network.lhost", setter: fld_set}]}, + "library": {to:[{field: "rsa.misc.library", setter: fld_set}]}, + "lifetime": {convert: to_long, to:[{field: "rsa.misc.lifetime", setter: fld_set}]}, + "linenum": {to:[{field: "rsa.misc.linenum", setter: fld_set}]}, + "link": {to:[{field: "rsa.misc.link", setter: fld_set}]}, + "linterface": {to:[{field: "rsa.network.linterface", setter: fld_set}]}, + "list_name": {to:[{field: "rsa.misc.list_name", setter: fld_set}]}, + "listnum": {to:[{field: "rsa.misc.listnum", setter: fld_set}]}, + "load_data": {to:[{field: "rsa.misc.load_data", setter: fld_set}]}, + "location_floor": {to:[{field: "rsa.misc.location_floor", setter: fld_set}]}, + "location_mark": {to:[{field: "rsa.misc.location_mark", setter: fld_set}]}, + "log_id": {to:[{field: "rsa.misc.log_id", setter: fld_set}]}, + "log_type": {to:[{field: "rsa.misc.log_type", setter: fld_set}]}, + "logid": {to:[{field: "rsa.misc.logid", setter: fld_set}]}, + "logip": {to:[{field: "rsa.misc.logip", setter: fld_set}]}, + "logname": {to:[{field: "rsa.misc.logname", setter: fld_set}]}, + "logon_type": {to:[{field: "rsa.identity.logon_type", setter: fld_set}]}, + "logon_type_desc": {to:[{field: "rsa.identity.logon_type_desc", setter: fld_set}]}, + "longitude": {to:[{field: "rsa.misc.longitude", setter: fld_set}]}, + "lport": {to:[{field: "rsa.misc.lport", setter: fld_set}]}, + "lread": {convert: to_long, to:[{field: "rsa.db.lread", setter: fld_set}]}, + "lun": {to:[{field: "rsa.storage.lun", setter: fld_set}]}, + "lwrite": {convert: to_long, to:[{field: "rsa.db.lwrite", setter: fld_set}]}, + "macaddr": {convert: to_mac, to:[{field: "rsa.network.eth_host", setter: fld_set}]}, + "mail_id": {to:[{field: "rsa.misc.mail_id", setter: fld_set}]}, + "mask": {to:[{field: "rsa.network.mask", setter: fld_set}]}, + "match": {to:[{field: "rsa.misc.match", setter: fld_set}]}, + "mbug_data": {to:[{field: "rsa.misc.mbug_data", setter: fld_set}]}, + "mcb.req": {convert: to_long, to:[{field: "rsa.internal.mcb_req", setter: fld_set}]}, + "mcb.res": {convert: to_long, to:[{field: "rsa.internal.mcb_res", setter: fld_set}]}, + "mcbc.req": {convert: to_long, to:[{field: "rsa.internal.mcbc_req", setter: fld_set}]}, + "mcbc.res": {convert: to_long, to:[{field: "rsa.internal.mcbc_res", setter: fld_set}]}, + "medium": {convert: to_long, to:[{field: "rsa.internal.medium", setter: fld_set}]}, + "message": {to:[{field: "rsa.internal.message", setter: fld_set}]}, + "message_body": {to:[{field: "rsa.misc.message_body", setter: fld_set}]}, + "messageid": {to:[{field: "rsa.internal.messageid", setter: fld_set}]}, + "min": {to:[{field: "rsa.time.min", setter: fld_set}]}, + "misc": {to:[{field: "rsa.misc.misc", setter: fld_set}]}, + "misc_name": {to:[{field: "rsa.misc.misc_name", setter: fld_set}]}, + "mode": {to:[{field: "rsa.misc.mode", setter: fld_set}]}, + "month": {to:[{field: "rsa.time.month", setter: fld_set}]}, + "msg": {to:[{field: "rsa.internal.msg", setter: fld_set}]}, + "msgIdPart1": {to:[{field: "rsa.misc.msgIdPart1", setter: fld_set}]}, + "msgIdPart2": {to:[{field: "rsa.misc.msgIdPart2", setter: fld_set}]}, + "msgIdPart3": {to:[{field: "rsa.misc.msgIdPart3", setter: fld_set}]}, + "msgIdPart4": {to:[{field: "rsa.misc.msgIdPart4", setter: fld_set}]}, + "msg_id": {to:[{field: "rsa.internal.msg_id", setter: fld_set}]}, + "msg_type": {to:[{field: "rsa.misc.msg_type", setter: fld_set}]}, + "msgid": {to:[{field: "rsa.misc.msgid", setter: fld_set}]}, + "name": {to:[{field: "rsa.misc.name", setter: fld_set}]}, + "netname": {to:[{field: "rsa.network.netname", setter: fld_set}]}, + "netsessid": {to:[{field: "rsa.misc.netsessid", setter: fld_set}]}, + "network_port": {convert: to_long, to:[{field: "rsa.network.network_port", setter: fld_set}]}, + "network_service": {to:[{field: "rsa.network.network_service", setter: fld_set}]}, + "node": {to:[{field: "rsa.misc.node", setter: fld_set}]}, + "nodename": {to:[{field: "rsa.internal.node_name", setter: fld_set}]}, + "ntype": {to:[{field: "rsa.misc.ntype", setter: fld_set}]}, + "num": {to:[{field: "rsa.misc.num", setter: fld_set}]}, + "number": {to:[{field: "rsa.misc.number", setter: fld_set}]}, + "number1": {to:[{field: "rsa.misc.number1", setter: fld_set}]}, + "number2": {to:[{field: "rsa.misc.number2", setter: fld_set}]}, + "nwe.callback_id": {to:[{field: "rsa.internal.nwe_callback_id", setter: fld_set}]}, + "nwwn": {to:[{field: "rsa.misc.nwwn", setter: fld_set}]}, + "obj_id": {to:[{field: "rsa.internal.obj_id", setter: fld_set}]}, + "obj_name": {to:[{field: "rsa.misc.obj_name", setter: fld_set}]}, + "obj_server": {to:[{field: "rsa.internal.obj_server", setter: fld_set}]}, + "obj_type": {to:[{field: "rsa.misc.obj_type", setter: fld_set}]}, + "obj_value": {to:[{field: "rsa.internal.obj_val", setter: fld_set}]}, + "object": {to:[{field: "rsa.misc.object", setter: fld_set}]}, + "observed_val": {to:[{field: "rsa.misc.observed_val", setter: fld_set}]}, + "operation": {to:[{field: "rsa.misc.operation", setter: fld_set}]}, + "operation_id": {to:[{field: "rsa.misc.operation_id", setter: fld_set}]}, + "opkt": {to:[{field: "rsa.misc.opkt", setter: fld_set}]}, + "org.dst": {to:[{field: "rsa.physical.org_dst", setter: fld_prio, prio: 1}]}, + "org.src": {to:[{field: "rsa.physical.org_src", setter: fld_set}]}, + "org_dst": {to:[{field: "rsa.physical.org_dst", setter: fld_prio, prio: 0}]}, + "orig_from": {to:[{field: "rsa.misc.orig_from", setter: fld_set}]}, + "origin": {to:[{field: "rsa.network.origin", setter: fld_set}]}, + "original_owner": {to:[{field: "rsa.identity.owner", setter: fld_set}]}, + "os": {to:[{field: "rsa.misc.OS", setter: fld_set}]}, + "owner_id": {to:[{field: "rsa.misc.owner_id", setter: fld_set}]}, + "p_action": {to:[{field: "rsa.misc.p_action", setter: fld_set}]}, + "p_date": {to:[{field: "rsa.time.p_date", setter: fld_set}]}, + "p_filter": {to:[{field: "rsa.misc.p_filter", setter: fld_set}]}, + "p_group_object": {to:[{field: "rsa.misc.p_group_object", setter: fld_set}]}, + "p_id": {to:[{field: "rsa.misc.p_id", setter: fld_set}]}, + "p_month": {to:[{field: "rsa.time.p_month", setter: fld_set}]}, + "p_msgid": {to:[{field: "rsa.misc.p_msgid", setter: fld_set}]}, + "p_msgid1": {to:[{field: "rsa.misc.p_msgid1", setter: fld_set}]}, + "p_msgid2": {to:[{field: "rsa.misc.p_msgid2", setter: fld_set}]}, + "p_result1": {to:[{field: "rsa.misc.p_result1", setter: fld_set}]}, + "p_time": {to:[{field: "rsa.time.p_time", setter: fld_set}]}, + "p_time1": {to:[{field: "rsa.time.p_time1", setter: fld_set}]}, + "p_time2": {to:[{field: "rsa.time.p_time2", setter: fld_set}]}, + "p_url": {to:[{field: "rsa.web.p_url", setter: fld_set}]}, + "p_user_agent": {to:[{field: "rsa.web.p_user_agent", setter: fld_set}]}, + "p_web_cookie": {to:[{field: "rsa.web.p_web_cookie", setter: fld_set}]}, + "p_web_method": {to:[{field: "rsa.web.p_web_method", setter: fld_set}]}, + "p_web_referer": {to:[{field: "rsa.web.p_web_referer", setter: fld_set}]}, + "p_year": {to:[{field: "rsa.time.p_year", setter: fld_set}]}, + "packet_length": {to:[{field: "rsa.network.packet_length", setter: fld_set}]}, + "paddr": {convert: to_ip, to:[{field: "rsa.network.paddr", setter: fld_set}]}, + "param": {to:[{field: "rsa.misc.param", setter: fld_set}]}, + "param.dst": {to:[{field: "rsa.misc.param_dst", setter: fld_set}]}, + "param.src": {to:[{field: "rsa.misc.param_src", setter: fld_set}]}, + "parent_node": {to:[{field: "rsa.misc.parent_node", setter: fld_set}]}, + "parse.error": {to:[{field: "rsa.internal.parse_error", setter: fld_set}]}, + "password": {to:[{field: "rsa.identity.password", setter: fld_set}]}, + "password_chg": {to:[{field: "rsa.misc.password_chg", setter: fld_set}]}, + "password_expire": {to:[{field: "rsa.misc.password_expire", setter: fld_set}]}, + "patient_fname": {to:[{field: "rsa.healthcare.patient_fname", setter: fld_set}]}, + "patient_id": {to:[{field: "rsa.healthcare.patient_id", setter: fld_set}]}, + "patient_lname": {to:[{field: "rsa.healthcare.patient_lname", setter: fld_set}]}, + "patient_mname": {to:[{field: "rsa.healthcare.patient_mname", setter: fld_set}]}, + "payload.req": {convert: to_long, to:[{field: "rsa.internal.payload_req", setter: fld_set}]}, + "payload.res": {convert: to_long, to:[{field: "rsa.internal.payload_res", setter: fld_set}]}, + "peer": {to:[{field: "rsa.crypto.peer", setter: fld_set}]}, + "peer_id": {to:[{field: "rsa.crypto.peer_id", setter: fld_set}]}, + "permgranted": {to:[{field: "rsa.misc.permgranted", setter: fld_set}]}, + "permissions": {to:[{field: "rsa.db.permissions", setter: fld_set}]}, + "permwanted": {to:[{field: "rsa.misc.permwanted", setter: fld_set}]}, + "pgid": {to:[{field: "rsa.misc.pgid", setter: fld_set}]}, + "phone_number": {to:[{field: "rsa.misc.phone", setter: fld_prio, prio: 2}]}, + "phost": {to:[{field: "rsa.network.phost", setter: fld_set}]}, + "pid": {to:[{field: "rsa.misc.pid", setter: fld_set}]}, + "policy": {to:[{field: "rsa.misc.policy", setter: fld_set}]}, + "policyUUID": {to:[{field: "rsa.misc.policyUUID", setter: fld_set}]}, + "policy_id": {to:[{field: "rsa.misc.policy_id", setter: fld_set}]}, + "policy_value": {to:[{field: "rsa.misc.policy_value", setter: fld_set}]}, + "policy_waiver": {to:[{field: "rsa.misc.policy_waiver", setter: fld_set}]}, + "policyname": {to:[{field: "rsa.misc.policy_name", setter: fld_prio, prio: 0}]}, + "pool_id": {to:[{field: "rsa.misc.pool_id", setter: fld_set}]}, + "pool_name": {to:[{field: "rsa.misc.pool_name", setter: fld_set}]}, + "port": {convert: to_long, to:[{field: "rsa.network.port", setter: fld_set}]}, + "portname": {to:[{field: "rsa.misc.port_name", setter: fld_set}]}, + "pread": {convert: to_long, to:[{field: "rsa.db.pread", setter: fld_set}]}, + "priority": {to:[{field: "rsa.misc.priority", setter: fld_set}]}, + "privilege": {to:[{field: "rsa.file.privilege", setter: fld_set}]}, + "process.vid.dst": {to:[{field: "rsa.internal.process_vid_dst", setter: fld_set}]}, + "process.vid.src": {to:[{field: "rsa.internal.process_vid_src", setter: fld_set}]}, + "process_id_val": {to:[{field: "rsa.misc.process_id_val", setter: fld_set}]}, + "processing_time": {to:[{field: "rsa.time.process_time", setter: fld_set}]}, + "profile": {to:[{field: "rsa.identity.profile", setter: fld_set}]}, + "prog_asp_num": {to:[{field: "rsa.misc.prog_asp_num", setter: fld_set}]}, + "program": {to:[{field: "rsa.misc.program", setter: fld_set}]}, + "protocol_detail": {to:[{field: "rsa.network.protocol_detail", setter: fld_set}]}, + "pwwn": {to:[{field: "rsa.storage.pwwn", setter: fld_set}]}, + "r_hostid": {to:[{field: "rsa.network.alias_host", setter: fld_append}]}, + "real_data": {to:[{field: "rsa.misc.real_data", setter: fld_set}]}, + "realm": {to:[{field: "rsa.identity.realm", setter: fld_set}]}, + "reason": {to:[{field: "rsa.misc.reason", setter: fld_set}]}, + "rec_asp_device": {to:[{field: "rsa.misc.rec_asp_device", setter: fld_set}]}, + "rec_asp_num": {to:[{field: "rsa.misc.rec_asp_num", setter: fld_set}]}, + "rec_library": {to:[{field: "rsa.misc.rec_library", setter: fld_set}]}, + "recorded_time": {convert: to_date, to:[{field: "rsa.time.recorded_time", setter: fld_set}]}, + "recordnum": {to:[{field: "rsa.misc.recordnum", setter: fld_set}]}, + "registry.key": {to:[{field: "rsa.endpoint.registry_key", setter: fld_set}]}, + "registry.value": {to:[{field: "rsa.endpoint.registry_value", setter: fld_set}]}, + "remote_domain": {to:[{field: "rsa.web.remote_domain", setter: fld_set}]}, + "remote_domain_id": {to:[{field: "rsa.network.remote_domain_id", setter: fld_set}]}, + "reputation_num": {convert: to_double, to:[{field: "rsa.web.reputation_num", setter: fld_set}]}, + "resource": {to:[{field: "rsa.internal.resource", setter: fld_set}]}, + "resource_class": {to:[{field: "rsa.internal.resource_class", setter: fld_set}]}, + "result": {to:[{field: "rsa.misc.result", setter: fld_set}]}, + "result_code": {to:[{field: "rsa.misc.result_code", setter: fld_prio, prio: 1}]}, + "resultcode": {to:[{field: "rsa.misc.result_code", setter: fld_prio, prio: 0}]}, + "rid": {convert: to_long, to:[{field: "rsa.internal.rid", setter: fld_set}]}, + "risk": {to:[{field: "rsa.misc.risk", setter: fld_set}]}, + "risk_info": {to:[{field: "rsa.misc.risk_info", setter: fld_set}]}, + "risk_num": {convert: to_double, to:[{field: "rsa.misc.risk_num", setter: fld_set}]}, + "risk_num_comm": {convert: to_double, to:[{field: "rsa.misc.risk_num_comm", setter: fld_set}]}, + "risk_num_next": {convert: to_double, to:[{field: "rsa.misc.risk_num_next", setter: fld_set}]}, + "risk_num_sand": {convert: to_double, to:[{field: "rsa.misc.risk_num_sand", setter: fld_set}]}, + "risk_num_static": {convert: to_double, to:[{field: "rsa.misc.risk_num_static", setter: fld_set}]}, + "risk_suspicious": {to:[{field: "rsa.misc.risk_suspicious", setter: fld_set}]}, + "risk_warning": {to:[{field: "rsa.misc.risk_warning", setter: fld_set}]}, + "rpayload": {to:[{field: "rsa.network.rpayload", setter: fld_set}]}, + "ruid": {to:[{field: "rsa.misc.ruid", setter: fld_set}]}, + "rule": {to:[{field: "rsa.misc.rule", setter: fld_set}]}, + "rule_group": {to:[{field: "rsa.misc.rule_group", setter: fld_set}]}, + "rule_template": {to:[{field: "rsa.misc.rule_template", setter: fld_set}]}, + "rule_uid": {to:[{field: "rsa.misc.rule_uid", setter: fld_set}]}, + "rulename": {to:[{field: "rsa.misc.rule_name", setter: fld_set}]}, + "s_certauth": {to:[{field: "rsa.crypto.s_certauth", setter: fld_set}]}, + "s_cipher": {to:[{field: "rsa.crypto.cipher_src", setter: fld_set}]}, + "s_ciphersize": {convert: to_long, to:[{field: "rsa.crypto.cipher_size_src", setter: fld_set}]}, + "s_context": {to:[{field: "rsa.misc.context_subject", setter: fld_set}]}, + "s_sslver": {to:[{field: "rsa.crypto.ssl_ver_src", setter: fld_set}]}, + "sburb": {to:[{field: "rsa.misc.sburb", setter: fld_set}]}, + "scheme": {to:[{field: "rsa.crypto.scheme", setter: fld_set}]}, + "sdomain_fld": {to:[{field: "rsa.misc.sdomain_fld", setter: fld_set}]}, + "search.text": {to:[{field: "rsa.misc.search_text", setter: fld_set}]}, + "sec": {to:[{field: "rsa.misc.sec", setter: fld_set}]}, + "second": {to:[{field: "rsa.misc.second", setter: fld_set}]}, + "sensor": {to:[{field: "rsa.misc.sensor", setter: fld_set}]}, + "sensorname": {to:[{field: "rsa.misc.sensorname", setter: fld_set}]}, + "seqnum": {to:[{field: "rsa.misc.seqnum", setter: fld_set}]}, + "serial_number": {to:[{field: "rsa.misc.serial_number", setter: fld_set}]}, + "service.account": {to:[{field: "rsa.identity.service_account", setter: fld_set}]}, + "session": {to:[{field: "rsa.misc.session", setter: fld_set}]}, + "session.split": {to:[{field: "rsa.internal.session_split", setter: fld_set}]}, + "sessionid": {to:[{field: "rsa.misc.log_session_id", setter: fld_set}]}, + "sessionid1": {to:[{field: "rsa.misc.log_session_id1", setter: fld_set}]}, + "sessiontype": {to:[{field: "rsa.misc.sessiontype", setter: fld_set}]}, + "severity": {to:[{field: "rsa.misc.severity", setter: fld_set}]}, + "sid": {to:[{field: "rsa.identity.user_sid_dst", setter: fld_set}]}, + "sig.name": {to:[{field: "rsa.misc.sig_name", setter: fld_set}]}, + "sigUUID": {to:[{field: "rsa.misc.sigUUID", setter: fld_set}]}, + "sigcat": {to:[{field: "rsa.misc.sigcat", setter: fld_set}]}, + "sigid": {convert: to_long, to:[{field: "rsa.misc.sig_id", setter: fld_set}]}, + "sigid1": {convert: to_long, to:[{field: "rsa.misc.sig_id1", setter: fld_set}]}, + "sigid_string": {to:[{field: "rsa.misc.sig_id_str", setter: fld_set}]}, + "signame": {to:[{field: "rsa.misc.policy_name", setter: fld_prio, prio: 1}]}, + "sigtype": {to:[{field: "rsa.crypto.sig_type", setter: fld_set}]}, + "sinterface": {to:[{field: "rsa.network.sinterface", setter: fld_set}]}, + "site": {to:[{field: "rsa.internal.site", setter: fld_set}]}, + "size": {convert: to_long, to:[{field: "rsa.internal.size", setter: fld_set}]}, + "smask": {to:[{field: "rsa.network.smask", setter: fld_set}]}, + "snmp.oid": {to:[{field: "rsa.misc.snmp_oid", setter: fld_set}]}, + "snmp.value": {to:[{field: "rsa.misc.snmp_value", setter: fld_set}]}, + "sourcefile": {to:[{field: "rsa.internal.sourcefile", setter: fld_set}]}, + "space": {to:[{field: "rsa.misc.space", setter: fld_set}]}, + "space1": {to:[{field: "rsa.misc.space1", setter: fld_set}]}, + "spi": {to:[{field: "rsa.misc.spi", setter: fld_set}]}, + "sql": {to:[{field: "rsa.misc.sql", setter: fld_set}]}, + "src_dn": {to:[{field: "rsa.identity.dn_src", setter: fld_set}]}, + "src_payload": {to:[{field: "rsa.misc.payload_src", setter: fld_set}]}, + "src_spi": {to:[{field: "rsa.misc.spi_src", setter: fld_set}]}, + "src_zone": {to:[{field: "rsa.network.zone_src", setter: fld_set}]}, + "srcburb": {to:[{field: "rsa.misc.srcburb", setter: fld_set}]}, + "srcdom": {to:[{field: "rsa.misc.srcdom", setter: fld_set}]}, + "srcservice": {to:[{field: "rsa.misc.srcservice", setter: fld_set}]}, + "ssid": {to:[{field: "rsa.wireless.wlan_ssid", setter: fld_prio, prio: 0}]}, + "stamp": {convert: to_date, to:[{field: "rsa.time.stamp", setter: fld_set}]}, + "starttime": {convert: to_date, to:[{field: "rsa.time.starttime", setter: fld_set}]}, + "state": {to:[{field: "rsa.misc.state", setter: fld_set}]}, + "statement": {to:[{field: "rsa.internal.statement", setter: fld_set}]}, + "status": {to:[{field: "rsa.misc.status", setter: fld_set}]}, + "status1": {to:[{field: "rsa.misc.status1", setter: fld_set}]}, + "streams": {convert: to_long, to:[{field: "rsa.misc.streams", setter: fld_set}]}, + "subcategory": {to:[{field: "rsa.misc.subcategory", setter: fld_set}]}, + "subject": {to:[{field: "rsa.email.subject", setter: fld_set}]}, + "svcno": {to:[{field: "rsa.misc.svcno", setter: fld_set}]}, + "system": {to:[{field: "rsa.misc.system", setter: fld_set}]}, + "t_context": {to:[{field: "rsa.misc.context_target", setter: fld_set}]}, + "task_name": {to:[{field: "rsa.file.task_name", setter: fld_set}]}, + "tbdstr1": {to:[{field: "rsa.misc.tbdstr1", setter: fld_set}]}, + "tbdstr2": {to:[{field: "rsa.misc.tbdstr2", setter: fld_set}]}, + "tbl_name": {to:[{field: "rsa.db.table_name", setter: fld_set}]}, + "tcp_flags": {convert: to_long, to:[{field: "rsa.misc.tcp_flags", setter: fld_set}]}, + "terminal": {to:[{field: "rsa.misc.terminal", setter: fld_set}]}, + "tgtdom": {to:[{field: "rsa.misc.tgtdom", setter: fld_set}]}, + "tgtdomain": {to:[{field: "rsa.misc.tgtdomain", setter: fld_set}]}, + "threat_name": {to:[{field: "rsa.threat.threat_category", setter: fld_set}]}, + "threat_source": {to:[{field: "rsa.threat.threat_source", setter: fld_set}]}, + "threat_val": {to:[{field: "rsa.threat.threat_desc", setter: fld_set}]}, + "threshold": {to:[{field: "rsa.misc.threshold", setter: fld_set}]}, + "time": {convert: to_date, to:[{field: "rsa.internal.time", setter: fld_set}]}, + "timestamp": {to:[{field: "rsa.time.timestamp", setter: fld_set}]}, + "timezone": {to:[{field: "rsa.time.timezone", setter: fld_set}]}, + "to": {to:[{field: "rsa.email.email_dst", setter: fld_set}]}, + "tos": {convert: to_long, to:[{field: "rsa.misc.tos", setter: fld_set}]}, + "trans_from": {to:[{field: "rsa.email.trans_from", setter: fld_set}]}, + "trans_id": {to:[{field: "rsa.db.transact_id", setter: fld_set}]}, + "trans_to": {to:[{field: "rsa.email.trans_to", setter: fld_set}]}, + "trigger_desc": {to:[{field: "rsa.misc.trigger_desc", setter: fld_set}]}, + "trigger_val": {to:[{field: "rsa.misc.trigger_val", setter: fld_set}]}, + "type": {to:[{field: "rsa.misc.type", setter: fld_set}]}, + "type1": {to:[{field: "rsa.misc.type1", setter: fld_set}]}, + "tzone": {to:[{field: "rsa.time.tzone", setter: fld_set}]}, + "ubc.req": {convert: to_long, to:[{field: "rsa.internal.ubc_req", setter: fld_set}]}, + "ubc.res": {convert: to_long, to:[{field: "rsa.internal.ubc_res", setter: fld_set}]}, + "udb_class": {to:[{field: "rsa.misc.udb_class", setter: fld_set}]}, + "url_fld": {to:[{field: "rsa.misc.url_fld", setter: fld_set}]}, + "urlpage": {to:[{field: "rsa.web.urlpage", setter: fld_set}]}, + "urlroot": {to:[{field: "rsa.web.urlroot", setter: fld_set}]}, + "user_address": {to:[{field: "rsa.email.email", setter: fld_append}]}, + "user_dept": {to:[{field: "rsa.identity.user_dept", setter: fld_set}]}, + "user_div": {to:[{field: "rsa.misc.user_div", setter: fld_set}]}, + "user_fname": {to:[{field: "rsa.identity.firstname", setter: fld_set}]}, + "user_lname": {to:[{field: "rsa.identity.lastname", setter: fld_set}]}, + "user_mname": {to:[{field: "rsa.identity.middlename", setter: fld_set}]}, + "user_org": {to:[{field: "rsa.identity.org", setter: fld_set}]}, + "user_role": {to:[{field: "rsa.identity.user_role", setter: fld_set}]}, + "userid": {to:[{field: "rsa.misc.userid", setter: fld_set}]}, + "username_fld": {to:[{field: "rsa.misc.username_fld", setter: fld_set}]}, + "utcstamp": {to:[{field: "rsa.misc.utcstamp", setter: fld_set}]}, + "v_instafname": {to:[{field: "rsa.misc.v_instafname", setter: fld_set}]}, + "vendor_event_cat": {to:[{field: "rsa.investigations.event_vcat", setter: fld_set}]}, + "version": {to:[{field: "rsa.misc.version", setter: fld_set}]}, + "vid": {to:[{field: "rsa.internal.msg_vid", setter: fld_set}]}, + "virt_data": {to:[{field: "rsa.misc.virt_data", setter: fld_set}]}, + "virusname": {to:[{field: "rsa.misc.virusname", setter: fld_set}]}, + "vlan": {convert: to_long, to:[{field: "rsa.network.vlan", setter: fld_set}]}, + "vlan.name": {to:[{field: "rsa.network.vlan_name", setter: fld_set}]}, + "vm_target": {to:[{field: "rsa.misc.vm_target", setter: fld_set}]}, + "vpnid": {to:[{field: "rsa.misc.vpnid", setter: fld_set}]}, + "vsys": {to:[{field: "rsa.misc.vsys", setter: fld_set}]}, + "vuln_ref": {to:[{field: "rsa.misc.vuln_ref", setter: fld_set}]}, + "web_cookie": {to:[{field: "rsa.web.web_cookie", setter: fld_set}]}, + "web_extension_tmp": {to:[{field: "rsa.web.web_extension_tmp", setter: fld_set}]}, + "web_host": {to:[{field: "rsa.web.alias_host", setter: fld_set}]}, + "web_method": {to:[{field: "rsa.misc.action", setter: fld_append}]}, + "web_page": {to:[{field: "rsa.web.web_page", setter: fld_set}]}, + "web_ref_domain": {to:[{field: "rsa.web.web_ref_domain", setter: fld_set}]}, + "web_ref_host": {to:[{field: "rsa.network.alias_host", setter: fld_append}]}, + "web_ref_page": {to:[{field: "rsa.web.web_ref_page", setter: fld_set}]}, + "web_ref_query": {to:[{field: "rsa.web.web_ref_query", setter: fld_set}]}, + "web_ref_root": {to:[{field: "rsa.web.web_ref_root", setter: fld_set}]}, + "wifi_channel": {convert: to_long, to:[{field: "rsa.wireless.wlan_channel", setter: fld_set}]}, + "wlan": {to:[{field: "rsa.wireless.wlan_name", setter: fld_set}]}, + "word": {to:[{field: "rsa.internal.word", setter: fld_set}]}, + "workspace_desc": {to:[{field: "rsa.misc.workspace", setter: fld_set}]}, + "workstation": {to:[{field: "rsa.network.alias_host", setter: fld_append}]}, + "year": {to:[{field: "rsa.time.year", setter: fld_set}]}, + "zone": {to:[{field: "rsa.network.zone", setter: fld_set}]}, + }; + + function to_date(value) { + switch (typeof (value)) { + case "object": + // This is a Date. But as it was obtained from evt.Get(), the VM + // doesn't see it as a JS Date anymore, thus value instanceof Date === false. + // Have to trust that any object here is a valid Date for Go. + return value; + case "string": + var asDate = new Date(value); + if (!isNaN(asDate)) return asDate; + } + } + + // ECMAScript 5.1 doesn't have Object.MAX_SAFE_INTEGER / Object.MIN_SAFE_INTEGER. + var maxSafeInt = Math.pow(2, 53) - 1; + var minSafeInt = -maxSafeInt; + + function to_long(value) { + var num = parseInt(value); + // Better not to index a number if it's not safe (above 53 bits). + return !isNaN(num) && minSafeInt <= num && num <= maxSafeInt ? num : undefined; + } + + function to_ip(value) { + if (value.indexOf(":") === -1) + return to_ipv4(value); + return to_ipv6(value); + } + + var ipv4_regex = /^(\d+)\.(\d+)\.(\d+)\.(\d+)$/; + var ipv6_hex_regex = /^[0-9A-Fa-f]{1,4}$/; + + function to_ipv4(value) { + var result = ipv4_regex.exec(value); + if (result == null || result.length !== 5) return; + for (var i = 1; i < 5; i++) { + var num = strictToInt(result[i]); + if (isNaN(num) || num < 0 || num > 255) return; + } + return value; + } + + function to_ipv6(value) { + var sqEnd = value.indexOf("]"); + if (sqEnd > -1) { + if (value.charAt(0) !== "[") return; + value = value.substr(1, sqEnd - 1); + } + var zoneOffset = value.indexOf("%"); + if (zoneOffset > -1) { + value = value.substr(0, zoneOffset); + } + var parts = value.split(":"); + if (parts == null || parts.length < 3 || parts.length > 8) return; + var numEmpty = 0; + var innerEmpty = 0; + for (var i = 0; i < parts.length; i++) { + if (parts[i].length === 0) { + numEmpty++; + if (i > 0 && i + 1 < parts.length) innerEmpty++; + } else if (!parts[i].match(ipv6_hex_regex) && + // Accept an IPv6 with a valid IPv4 at the end. + ((i + 1 < parts.length) || !to_ipv4(parts[i]))) { + return; + } + } + return innerEmpty === 0 && parts.length === 8 || innerEmpty === 1 ? value : undefined; + } + + function to_double(value) { + return parseFloat(value); + } + + function to_mac(value) { + // ES doesn't have a mac datatype so it's safe to ingest whatever was captured. + return value; + } + + function to_lowercase(value) { + // to_lowercase is used against keyword fields, which can accept + // any other type (numbers, dates). + return typeof(value) === "string"? value.toLowerCase() : value; + } + + function fld_set(dst, value) { + dst[this.field] = { v: value }; + } + + function fld_append(dst, value) { + if (dst[this.field] === undefined) { + dst[this.field] = { v: [value] }; + } else { + var base = dst[this.field]; + if (base.v.indexOf(value)===-1) base.v.push(value); + } + } + + function fld_prio(dst, value) { + if (dst[this.field] === undefined) { + dst[this.field] = { v: value, prio: this.prio}; + } else if(this.prio < dst[this.field].prio) { + dst[this.field].v = value; + dst[this.field].prio = this.prio; + } + } + + var valid_ecs_outcome = { + 'failure': true, + 'success': true, + 'unknown': true + }; + + function fld_ecs_outcome(dst, value) { + value = value.toLowerCase(); + if (valid_ecs_outcome[value] === undefined) { + value = 'unknown'; + } + if (dst[this.field] === undefined) { + dst[this.field] = { v: value }; + } else if (dst[this.field].v === 'unknown') { + dst[this.field] = { v: value }; + } + } + + function map_all(evt, targets, value) { + for (var i = 0; i < targets.length; i++) { + evt.Put(targets[i], value); + } + } + + function populate_fields(evt) { + var base = evt.Get(FIELDS_OBJECT); + if (base === null) return; + alternate_datetime(evt); + if (map_ecs) { + do_populate(evt, base, ecs_mappings); + } + if (map_rsa) { + do_populate(evt, base, rsa_mappings); + } + if (keep_raw) { + evt.Put("rsa.raw", base); + } + evt.Delete(FIELDS_OBJECT); + } + + var datetime_alt_components = [ + {field: "day", fmts: [[dF]]}, + {field: "year", fmts: [[dW]]}, + {field: "month", fmts: [[dB],[dG]]}, + {field: "date", fmts: [[dW,dSkip,dG,dSkip,dF],[dW,dSkip,dB,dSkip,dF],[dW,dSkip,dR,dSkip,dF]]}, + {field: "hour", fmts: [[dN]]}, + {field: "min", fmts: [[dU]]}, + {field: "secs", fmts: [[dO]]}, + {field: "time", fmts: [[dN, dSkip, dU, dSkip, dO]]}, + ]; + + function alternate_datetime(evt) { + if (evt.Get(FIELDS_PREFIX + "event_time") != null) { + return; + } + var tzOffset = tz_offset; + if (tzOffset === "event") { + tzOffset = evt.Get("event.timezone"); + } + var container = new DateContainer(tzOffset); + for (var i=0; i} %{timezone}]||%{web_method}||%{web_host}||%{webpage}||%{web_query}||%{network_service}||%{resultcode}||%{sbytes}||%{web_referer}||%{user_agent}||%{web_cookie}", processor_chain([ + dup1, + dup2, + dup3, + dup4, + dup5, + dup6, + ])); + + var hdr1 = match("HEADER#0:0001", "message", "%APACHETOMCAT-%{level}-%{messageid}: %{payload}", processor_chain([ + setc("header_id","0001"), + ])); + + var hdr2 = match("HEADER#1:0002", "message", "%{hmonth->} %{hday->} %{htime->} %{hostname->} %APACHETOMCAT- %{messageid}: %{payload}", processor_chain([ + setc("header_id","0002"), + ])); + + var select1 = linear_select([ + hdr1, + hdr2, + ]); + + var msg1 = msg("ABCD", dup7); + + var msg2 = msg("BADMETHOD", dup7); + + var msg3 = msg("BADMTHD", dup7); + + var msg4 = msg("BDMTHD", dup7); + + var msg5 = msg("INDEX", dup7); + + var msg6 = msg("CFYZ", dup7); + + var msg7 = msg("CONNECT", dup7); + + var msg8 = msg("DELETE", dup7); + + var msg9 = msg("DETECT_METHOD_TYPE", dup7); + + var msg10 = msg("FGET", dup7); + + var msg11 = msg("GET", dup7); + + var msg12 = msg("get", dup7); + + var msg13 = msg("HEAD", dup7); + + var msg14 = msg("id", dup7); + + var msg15 = msg("LOCK", dup7); + + var msg16 = msg("MKCOL", dup7); + + var msg17 = msg("NCIRCLE", dup7); + + var msg18 = msg("OPTIONS", dup7); + + var msg19 = msg("POST", dup7); + + var msg20 = msg("PRONECT", dup7); + + var msg21 = msg("PROPFIND", dup7); + + var msg22 = msg("PUT", dup7); + + var msg23 = msg("QUALYS", dup7); + + var msg24 = msg("SEARCH", dup7); + + var msg25 = msg("TRACK", dup7); + + var msg26 = msg("TRACE", dup7); + + var msg27 = msg("uGET", dup7); + + var msg28 = msg("null", dup7); + + var msg29 = msg("rndmmtd", dup7); + + var msg30 = msg("RNDMMTD", dup7); + + var msg31 = msg("asdf", dup7); + + var msg32 = msg("DEBUG", dup7); + + var msg33 = msg("COOK", dup7); + + var msg34 = msg("nGET", dup7); + + var chain1 = processor_chain([ + select1, + msgid_select({ + "ABCD": msg1, + "BADMETHOD": msg2, + "BADMTHD": msg3, + "BDMTHD": msg4, + "CFYZ": msg6, + "CONNECT": msg7, + "COOK": msg33, + "DEBUG": msg32, + "DELETE": msg8, + "DETECT_METHOD_TYPE": msg9, + "FGET": msg10, + "GET": msg11, + "HEAD": msg13, + "INDEX": msg5, + "LOCK": msg15, + "MKCOL": msg16, + "NCIRCLE": msg17, + "OPTIONS": msg18, + "POST": msg19, + "PRONECT": msg20, + "PROPFIND": msg21, + "PUT": msg22, + "QUALYS": msg23, + "RNDMMTD": msg30, + "SEARCH": msg24, + "TRACE": msg26, + "TRACK": msg25, + "asdf": msg31, + "get": msg12, + "id": msg14, + "nGET": msg34, + "null": msg28, + "rndmmtd": msg29, + "uGET": msg27, + }), + ]); + + var part1 = match("MESSAGE#0:ABCD", "nwparser.payload", "%{saddr}||%{fld5}||%{username}||[%{fld7->} %{timezone}]||%{web_method}||%{web_host}||%{webpage}||%{web_query}||%{network_service}||%{resultcode}||%{sbytes}||%{web_referer}||%{user_agent}||%{web_cookie}", processor_chain([ + dup1, + dup2, + dup3, + dup4, + dup5, + dup6, + ])); + - community_id: null + - registered_domain: + field: dns.question.name + ignore_failure: true + ignore_missing: true + target_etld_field: dns.question.top_level_domain + target_field: dns.question.registered_domain + target_subdomain_field: dns.question.subdomain + - registered_domain: + field: client.domain + ignore_failure: true + ignore_missing: true + target_etld_field: client.top_level_domain + target_field: client.registered_domain + target_subdomain_field: client.subdomain + - registered_domain: + field: server.domain + ignore_failure: true + ignore_missing: true + target_etld_field: server.top_level_domain + target_field: server.registered_domain + target_subdomain_field: server.subdomain + - registered_domain: + field: destination.domain + ignore_failure: true + ignore_missing: true + target_etld_field: destination.top_level_domain + target_field: destination.registered_domain + target_subdomain_field: destination.subdomain + - registered_domain: + field: source.domain + ignore_failure: true + ignore_missing: true + target_etld_field: source.top_level_domain + target_field: source.registered_domain + target_subdomain_field: source.subdomain + - registered_domain: + field: url.domain + ignore_failure: true + ignore_missing: true + target_etld_field: url.top_level_domain + target_field: url.registered_domain + target_subdomain_field: url.subdomain + - add_locale: null + tags: + - tomcat-log + - forwarded + tcp: null + data_stream.namespace: default + - name: filestream-tomcat + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.tomcat.log.enabled} == true and ${kubernetes.hints.tomcat.enabled} == true + data_stream: + dataset: tomcat.log + type: logs + exclude_files: + - .gz$ + fields: + observer: + product: TomCat + type: Web + vendor: Apache + fields_under_root: true + parsers: + - container: + format: auto + stream: ${kubernetes.hints.tomcat.log.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + processors: + - script: + lang: javascript + params: + debug: false + ecs: true + keep_raw: false + rsa: true + tz_offset: local + source: | + // Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + // or more contributor license agreements. Licensed under the Elastic License; + // you may not use this file except in compliance with the Elastic License. + + /* jshint -W014,-W016,-W097,-W116 */ + + var processor = require("processor"); + var console = require("console"); + + var FLAG_FIELD = "log.flags"; + var FIELDS_OBJECT = "nwparser"; + var FIELDS_PREFIX = FIELDS_OBJECT + "."; + + var defaults = { + debug: false, + ecs: true, + rsa: false, + keep_raw: false, + tz_offset: "local", + strip_priority: true + }; + + var saved_flags = null; + var debug; + var map_ecs; + var map_rsa; + var keep_raw; + var device; + var tz_offset; + var strip_priority; + + // Register params from configuration. + function register(params) { + debug = params.debug !== undefined ? params.debug : defaults.debug; + map_ecs = params.ecs !== undefined ? params.ecs : defaults.ecs; + map_rsa = params.rsa !== undefined ? params.rsa : defaults.rsa; + keep_raw = params.keep_raw !== undefined ? params.keep_raw : defaults.keep_raw; + tz_offset = parse_tz_offset(params.tz_offset !== undefined? params.tz_offset : defaults.tz_offset); + strip_priority = params.strip_priority !== undefined? params.strip_priority : defaults.strip_priority; + device = new DeviceProcessor(); + } + + function parse_tz_offset(offset) { + var date; + var m; + switch(offset) { + // local uses the tz offset from the JS VM. + case "local": + date = new Date(); + // Reversing the sign as we the offset from UTC, not to UTC. + return parse_local_tz_offset(-date.getTimezoneOffset()); + // event uses the tz offset from event.timezone (add_locale processor). + case "event": + return offset; + // Otherwise a tz offset in the form "[+-][0-9]{4}" is required. + default: + m = offset.match(/^([+\-])([0-9]{2}):?([0-9]{2})?$/); + if (m === null || m.length !== 4) { + throw("bad timezone offset: '" + offset + "'. Must have the form +HH:MM"); + } + return m[1] + m[2] + ":" + (m[3]!==undefined? m[3] : "00"); + } + } + + function parse_local_tz_offset(minutes) { + var neg = minutes < 0; + minutes = Math.abs(minutes); + var min = minutes % 60; + var hours = Math.floor(minutes / 60); + var pad2digit = function(n) { + if (n < 10) { return "0" + n;} + return "" + n; + }; + return (neg? "-" : "+") + pad2digit(hours) + ":" + pad2digit(min); + } + + function process(evt) { + // Function register is only called by the processor when `params` are set + // in the processor config. + if (device === undefined) { + register(defaults); + } + return device.process(evt); + } + + function processor_chain(subprocessors) { + var builder = new processor.Chain(); + subprocessors.forEach(builder.Add); + return builder.Build().Run; + } + + function linear_select(subprocessors) { + return function (evt) { + var flags = evt.Get(FLAG_FIELD); + var i; + for (i = 0; i < subprocessors.length; i++) { + evt.Delete(FLAG_FIELD); + if (debug) console.warn("linear_select trying entry " + i); + subprocessors[i](evt); + // Dissect processor succeeded? + if (evt.Get(FLAG_FIELD) == null) break; + if (debug) console.warn("linear_select failed entry " + i); + } + if (flags !== null) { + evt.Put(FLAG_FIELD, flags); + } + if (debug) { + if (i < subprocessors.length) { + console.warn("linear_select matched entry " + i); + } else { + console.warn("linear_select didn't match"); + } + } + }; + } + + function conditional(opt) { + return function(evt) { + if (opt.if(evt)) { + opt.then(evt); + } else if (opt.else) { + opt.else(evt); + } + }; + } + + var strip_syslog_priority = (function() { + var isEnabled = function() { return strip_priority === true; }; + var fetchPRI = field("_pri"); + var fetchPayload = field("payload"); + var removePayload = remove(["payload"]); + var cleanup = remove(["_pri", "payload"]); + var onMatch = function(evt) { + var pri, priStr = fetchPRI(evt); + if (priStr != null + && 0 < priStr.length && priStr.length < 4 + && !isNaN((pri = Number(priStr))) + && 0 <= pri && pri < 192) { + var severity = pri & 7, + facility = pri >> 3; + setc("_severity", "" + severity)(evt); + setc("_facility", "" + facility)(evt); + // Replace message with priority stripped. + evt.Put("message", fetchPayload(evt)); + removePayload(evt); + } else { + // not a valid syslog PRI, cleanup. + cleanup(evt); + } + }; + return conditional({ + if: isEnabled, + then: cleanup_flags(match( + "STRIP_PRI", + "message", + "<%{_pri}>%{payload}", + onMatch + )) + }); + })(); + + function match(id, src, pattern, on_success) { + var dissect = new processor.Dissect({ + field: src, + tokenizer: pattern, + target_prefix: FIELDS_OBJECT, + ignore_failure: true, + overwrite_keys: true, + trim_values: "right" + }); + return function (evt) { + var msg = evt.Get(src); + dissect.Run(evt); + var failed = evt.Get(FLAG_FIELD) != null; + if (debug) { + if (failed) { + console.debug("dissect fail: " + id + " field:" + src); + } else { + console.debug("dissect OK: " + id + " field:" + src); + } + console.debug(" expr: <<" + pattern + ">>"); + console.debug(" input: <<" + msg + ">>"); + } + if (on_success != null && !failed) { + on_success(evt); + } + }; + } + + function match_copy(id, src, dst, on_success) { + dst = FIELDS_PREFIX + dst; + if (dst === FIELDS_PREFIX || dst === src) { + return function (evt) { + if (debug) { + console.debug("noop OK: " + id + " field:" + src); + console.debug(" input: <<" + evt.Get(src) + ">>"); + } + if (on_success != null) on_success(evt); + } + } + return function (evt) { + var msg = evt.Get(src); + evt.Put(dst, msg); + if (debug) { + console.debug("copy OK: " + id + " field:" + src); + console.debug(" target: '" + dst + "'"); + console.debug(" input: <<" + msg + ">>"); + } + if (on_success != null) on_success(evt); + } + } + + function cleanup_flags(processor) { + return function(evt) { + processor(evt); + evt.Delete(FLAG_FIELD); + }; + } + + function all_match(opts) { + return function (evt) { + var i; + for (i = 0; i < opts.processors.length; i++) { + evt.Delete(FLAG_FIELD); + opts.processors[i](evt); + // Dissect processor succeeded? + if (evt.Get(FLAG_FIELD) != null) { + if (debug) console.warn("all_match failure at " + i); + if (opts.on_failure != null) opts.on_failure(evt); + return; + } + if (debug) console.warn("all_match success at " + i); + } + if (opts.on_success != null) opts.on_success(evt); + }; + } + + function msgid_select(mapping) { + return function (evt) { + var msgid = evt.Get(FIELDS_PREFIX + "messageid"); + if (msgid == null) { + if (debug) console.warn("msgid_select: no messageid captured!"); + return; + } + var next = mapping[msgid]; + if (next === undefined) { + if (debug) console.warn("msgid_select: no mapping for messageid:" + msgid); + return; + } + if (debug) console.info("msgid_select: matched key=" + msgid); + return next(evt); + }; + } + + function msg(msg_id, match) { + return function (evt) { + match(evt); + if (evt.Get(FLAG_FIELD) == null) { + evt.Put(FIELDS_PREFIX + "msg_id1", msg_id); + } + }; + } + + var start; + + function save_flags(evt) { + saved_flags = evt.Get(FLAG_FIELD); + evt.Put("event.original", evt.Get("message")); + } + + function restore_flags(evt) { + if (saved_flags !== null) { + evt.Put(FLAG_FIELD, saved_flags); + } + evt.Delete("message"); + } + + function constant(value) { + return function (evt) { + return value; + }; + } + + function field(name) { + var fullname = FIELDS_PREFIX + name; + return function (evt) { + return evt.Get(fullname); + }; + } + + function STRCAT(args) { + var s = ""; + var i; + for (i = 0; i < args.length; i++) { + s += args[i]; + } + return s; + } + + // TODO: Implement + function DIRCHK(args) { + unimplemented("DIRCHK"); + } + + function strictToInt(str) { + return str * 1; + } + + function CALC(args) { + if (args.length !== 3) { + console.warn("skipped call to CALC with " + args.length + " arguments."); + return; + } + var a = strictToInt(args[0]); + var b = strictToInt(args[2]); + if (isNaN(a) || isNaN(b)) { + console.warn("failed evaluating CALC arguments a='" + args[0] + "' b='" + args[2] + "'."); + return; + } + var result; + switch (args[1]) { + case "+": + result = a + b; + break; + case "-": + result = a - b; + break; + case "*": + result = a * b; + break; + default: + // Only * and + seen in the parsers. + console.warn("unknown CALC operation '" + args[1] + "'."); + return; + } + // Always return a string + return result !== undefined ? "" + result : result; + } + + var quoteChars = "\"'`"; + function RMQ(args) { + if(args.length !== 1) { + console.warn("RMQ: only one argument expected"); + return; + } + var value = args[0].trim(); + var n = value.length; + var char; + return n > 1 + && (char=value.charAt(0)) === value.charAt(n-1) + && quoteChars.indexOf(char) !== -1? + value.substr(1, n-2) + : value; + } + + function call(opts) { + var args = new Array(opts.args.length); + return function (evt) { + for (var i = 0; i < opts.args.length; i++) + if ((args[i] = opts.args[i](evt)) == null) return; + var result = opts.fn(args); + if (result != null) { + evt.Put(opts.dest, result); + } + }; + } + + function nop(evt) { + } + + function appendErrorMsg(evt, msg) { + var value = evt.Get("error.message"); + if (value == null) { + value = [msg]; + } else if (msg instanceof Array) { + value.push(msg); + } else { + value = [value, msg]; + } + evt.Put("error.message", value); + } + + function unimplemented(name) { + appendErrorMsg("unimplemented feature: " + name); + } + + function lookup(opts) { + return function (evt) { + var key = opts.key(evt); + if (key == null) return; + var value = opts.map.keyvaluepairs[key]; + if (value === undefined) { + value = opts.map.default; + } + if (value !== undefined) { + evt.Put(opts.dest, value(evt)); + } + }; + } + + function set(fields) { + return new processor.AddFields({ + target: FIELDS_OBJECT, + fields: fields, + }); + } + + function setf(dst, src) { + return function (evt) { + var val = evt.Get(FIELDS_PREFIX + src); + if (val != null) evt.Put(FIELDS_PREFIX + dst, val); + }; + } + + function setc(dst, value) { + return function (evt) { + evt.Put(FIELDS_PREFIX + dst, value); + }; + } + + function set_field(opts) { + return function (evt) { + var val = opts.value(evt); + if (val != null) evt.Put(opts.dest, val); + }; + } + + function dump(label) { + return function (evt) { + console.log("Dump of event at " + label + ": " + JSON.stringify(evt, null, "\t")); + }; + } + + function date_time_join_args(evt, arglist) { + var str = ""; + for (var i = 0; i < arglist.length; i++) { + var fname = FIELDS_PREFIX + arglist[i]; + var val = evt.Get(fname); + if (val != null) { + if (str !== "") str += " "; + str += val; + } else { + if (debug) console.warn("in date_time: input arg " + fname + " is not set"); + } + } + return str; + } + + function to2Digit(num) { + return num? (num < 10? "0" + num : num) : "00"; + } + + // Make two-digit dates 00-69 interpreted as 2000-2069 + // and dates 70-99 translated to 1970-1999. + var twoDigitYearEpoch = 70; + var twoDigitYearCentury = 2000; + + // This is to accept dates up to 2 days in the future, only used when + // no year is specified in a date. 2 days should be enough to account for + // time differences between systems and different tz offsets. + var maxFutureDelta = 2*24*60*60*1000; + + // DateContainer stores date fields and then converts those fields into + // a Date. Necessary because building a Date using its set() methods gives + // different results depending on the order of components. + function DateContainer(tzOffset) { + this.offset = tzOffset === undefined? "Z" : tzOffset; + } + + DateContainer.prototype = { + setYear: function(v) {this.year = v;}, + setMonth: function(v) {this.month = v;}, + setDay: function(v) {this.day = v;}, + setHours: function(v) {this.hours = v;}, + setMinutes: function(v) {this.minutes = v;}, + setSeconds: function(v) {this.seconds = v;}, + + setUNIX: function(v) {this.unix = v;}, + + set2DigitYear: function(v) { + this.year = v < twoDigitYearEpoch? twoDigitYearCentury + v : twoDigitYearCentury + v - 100; + }, + + toDate: function() { + if (this.unix !== undefined) { + return new Date(this.unix * 1000); + } + if (this.day === undefined || this.month === undefined) { + // Can't make a date from this. + return undefined; + } + if (this.year === undefined) { + // A date without a year. Set current year, or previous year + // if date would be in the future. + var now = new Date(); + this.year = now.getFullYear(); + var date = this.toDate(); + if (date.getTime() - now.getTime() > maxFutureDelta) { + date.setFullYear(now.getFullYear() - 1); + } + return date; + } + var MM = to2Digit(this.month); + var DD = to2Digit(this.day); + var hh = to2Digit(this.hours); + var mm = to2Digit(this.minutes); + var ss = to2Digit(this.seconds); + return new Date(this.year + "-" + MM + "-" + DD + "T" + hh + ":" + mm + ":" + ss + this.offset); + } + } + + function date_time_try_pattern(fmt, str, tzOffset) { + var date = new DateContainer(tzOffset); + var pos = date_time_try_pattern_at_pos(fmt, str, 0, date); + return pos !== undefined? date.toDate() : undefined; + } + + function date_time_try_pattern_at_pos(fmt, str, pos, date) { + var len = str.length; + for (var proc = 0; pos !== undefined && pos < len && proc < fmt.length; proc++) { + pos = fmt[proc](str, pos, date); + } + return pos; + } + + function date_time(opts) { + return function (evt) { + var tzOffset = opts.tz || tz_offset; + if (tzOffset === "event") { + tzOffset = evt.Get("event.timezone"); + } + var str = date_time_join_args(evt, opts.args); + for (var i = 0; i < opts.fmts.length; i++) { + var date = date_time_try_pattern(opts.fmts[i], str, tzOffset); + if (date !== undefined) { + evt.Put(FIELDS_PREFIX + opts.dest, date); + return; + } + } + if (debug) console.warn("in date_time: id=" + opts.id + " FAILED: " + str); + }; + } + + var uA = 60 * 60 * 24; + var uD = 60 * 60 * 24; + var uF = 60 * 60; + var uG = 60 * 60 * 24 * 30; + var uH = 60 * 60; + var uI = 60 * 60; + var uJ = 60 * 60 * 24; + var uM = 60 * 60 * 24 * 30; + var uN = 60 * 60; + var uO = 1; + var uS = 1; + var uT = 60; + var uU = 60; + var uc = dc; + + function duration(opts) { + return function(evt) { + var str = date_time_join_args(evt, opts.args); + for (var i = 0; i < opts.fmts.length; i++) { + var seconds = duration_try_pattern(opts.fmts[i], str); + if (seconds !== undefined) { + evt.Put(FIELDS_PREFIX + opts.dest, seconds); + return; + } + } + if (debug) console.warn("in duration: id=" + opts.id + " (s) FAILED: " + str); + }; + } + + function duration_try_pattern(fmt, str) { + var secs = 0; + var pos = 0; + for (var i=0; i [ month_id , how many chars to skip if month in long form ] + "Jan": [0, 4], + "Feb": [1, 5], + "Mar": [2, 2], + "Apr": [3, 2], + "May": [4, 0], + "Jun": [5, 1], + "Jul": [6, 1], + "Aug": [7, 3], + "Sep": [8, 6], + "Oct": [9, 4], + "Nov": [10, 5], + "Dec": [11, 4], + "jan": [0, 4], + "feb": [1, 5], + "mar": [2, 2], + "apr": [3, 2], + "may": [4, 0], + "jun": [5, 1], + "jul": [6, 1], + "aug": [7, 3], + "sep": [8, 6], + "oct": [9, 4], + "nov": [10, 5], + "dec": [11, 4], + }; + + // var dC = undefined; + var dR = dateMonthName(true); + var dB = dateMonthName(false); + var dM = dateFixedWidthNumber("M", 2, 1, 12, DateContainer.prototype.setMonth); + var dG = dateVariableWidthNumber("G", 1, 12, DateContainer.prototype.setMonth); + var dD = dateFixedWidthNumber("D", 2, 1, 31, DateContainer.prototype.setDay); + var dF = dateVariableWidthNumber("F", 1, 31, DateContainer.prototype.setDay); + var dH = dateFixedWidthNumber("H", 2, 0, 24, DateContainer.prototype.setHours); + var dI = dateVariableWidthNumber("I", 0, 24, DateContainer.prototype.setHours); // Accept hours >12 + var dN = dateVariableWidthNumber("N", 0, 24, DateContainer.prototype.setHours); + var dT = dateFixedWidthNumber("T", 2, 0, 59, DateContainer.prototype.setMinutes); + var dU = dateVariableWidthNumber("U", 0, 59, DateContainer.prototype.setMinutes); + var dP = parseAMPM; // AM|PM + var dQ = parseAMPM; // A.M.|P.M + var dS = dateFixedWidthNumber("S", 2, 0, 60, DateContainer.prototype.setSeconds); + var dO = dateVariableWidthNumber("O", 0, 60, DateContainer.prototype.setSeconds); + var dY = dateFixedWidthNumber("Y", 2, 0, 99, DateContainer.prototype.set2DigitYear); + var dW = dateFixedWidthNumber("W", 4, 1000, 9999, DateContainer.prototype.setYear); + var dZ = parseHMS; + var dX = dateVariableWidthNumber("X", 0, 0x10000000000, DateContainer.prototype.setUNIX); + + // parseAMPM parses "A.M", "AM", "P.M", "PM" from logs. + // Only works if this modifier appears after the hour has been read from logs + // which is always the case in the 300 devices. + function parseAMPM(str, pos, date) { + var n = str.length; + var start = skipws(str, pos); + if (start + 2 > n) return; + var head = str.substr(start, 2).toUpperCase(); + var isPM = false; + var skip = false; + switch (head) { + case "A.": + skip = true; + /* falls through */ + case "AM": + break; + case "P.": + skip = true; + /* falls through */ + case "PM": + isPM = true; + break; + default: + if (debug) console.warn("can't parse pos " + start + " as AM/PM: " + str + "(head:" + head + ")"); + return; + } + pos = start + 2; + if (skip) { + if (pos+2 > n || str.substr(pos, 2).toUpperCase() !== "M.") { + if (debug) console.warn("can't parse pos " + start + " as AM/PM: " + str + "(tail)"); + return; + } + pos += 2; + } + var hh = date.hours; + if (isPM) { + // Accept existing hour in 24h format. + if (hh < 12) hh += 12; + } else { + if (hh === 12) hh = 0; + } + date.setHours(hh); + return pos; + } + + function parseHMS(str, pos, date) { + return date_time_try_pattern_at_pos([dN, dc(":"), dU, dc(":"), dO], str, pos, date); + } + + function skipws(str, pos) { + for ( var n = str.length; + pos < n && str.charAt(pos) === " "; + pos++) + ; + return pos; + } + + function skipdigits(str, pos) { + var c; + for (var n = str.length; + pos < n && (c = str.charAt(pos)) >= "0" && c <= "9"; + pos++) + ; + return pos; + } + + function dSkip(str, pos, date) { + var chr; + for (;pos < str.length && (chr=str[pos])<'0' || chr>'9'; pos++) {} + return pos < str.length? pos : undefined; + } + + function dateVariableWidthNumber(fmtChar, min, max, setter) { + return function (str, pos, date) { + var start = skipws(str, pos); + pos = skipdigits(str, start); + var s = str.substr(start, pos - start); + var value = parseInt(s, 10); + if (value >= min && value <= max) { + setter.call(date, value); + return pos; + } + return; + }; + } + + function dateFixedWidthNumber(fmtChar, width, min, max, setter) { + return function (str, pos, date) { + pos = skipws(str, pos); + var n = str.length; + if (pos + width > n) return; + var s = str.substr(pos, width); + var value = parseInt(s, 10); + if (value >= min && value <= max) { + setter.call(date, value); + return pos + width; + } + return; + }; + } + + // Short month name (Jan..Dec). + function dateMonthName(long) { + return function (str, pos, date) { + pos = skipws(str, pos); + var n = str.length; + if (pos + 3 > n) return; + var mon = str.substr(pos, 3); + var idx = shortMonths[mon]; + if (idx === undefined) { + idx = shortMonths[mon.toLowerCase()]; + } + if (idx === undefined) { + //console.warn("parsing date_time: '" + mon + "' is not a valid short month (%B)"); + return; + } + date.setMonth(idx[0]+1); + return pos + 3 + (long ? idx[1] : 0); + }; + } + + function url_wrapper(dst, src, fn) { + return function(evt) { + var value = evt.Get(FIELDS_PREFIX + src), result; + if (value != null && (result = fn(value))!== undefined) { + evt.Put(FIELDS_PREFIX + dst, result); + } else { + console.debug(fn.name + " failed for '" + value + "'"); + } + }; + } + + // The following regular expression for parsing URLs from: + // https://github.com/wizard04wsu/URI_Parsing + // + // The MIT License (MIT) + // + // Copyright (c) 2014 Andrew Harrison + // + // Permission is hereby granted, free of charge, to any person obtaining a copy of + // this software and associated documentation files (the "Software"), to deal in + // the Software without restriction, including without limitation the rights to + // use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + // the Software, and to permit persons to whom the Software is furnished to do so, + // subject to the following conditions: + // + // The above copyright notice and this permission notice shall be included in all + // copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + // FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + // COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + // IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + // CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + var uriRegExp = /^([a-z][a-z0-9+.\-]*):(?:\/\/((?:(?=((?:[a-z0-9\-._~!$&'()*+,;=:]|%[0-9A-F]{2})*))(\3)@)?(?=(\[[0-9A-F:.]{2,}\]|(?:[a-z0-9\-._~!$&'()*+,;=]|%[0-9A-F]{2})*))\5(?::(?=(\d*))\6)?)(\/(?=((?:[a-z0-9-._~!$&'()*+,;=:@\/]|%[0-9A-F]{2})*))\8)?|(\/?(?!\/)(?=((?:[a-z0-9-._~!$&'()*+,;=:@\/]|%[0-9A-F]{2})*))\10)?)(?:\?(?=((?:[a-z0-9-._~!$&'()*+,;=:@\/?]|%[0-9A-F]{2})*))\11)?(?:#(?=((?:[a-z0-9-._~!$&'()*+,;=:@\/?]|%[0-9A-F]{2})*))\12)?$/i; + + var uriScheme = 1; + var uriDomain = 5; + var uriPort = 6; + var uriPath = 7; + var uriPathAlt = 9; + var uriQuery = 11; + + function domain(dst, src) { + return url_wrapper(dst, src, extract_domain); + } + + function split_url(value) { + var m = value.match(uriRegExp); + if (m && m[uriDomain]) return m; + // Support input in the form "www.example.net/path", but not "/path". + m = ("null://" + value).match(uriRegExp); + if (m) return m; + } + + function extract_domain(value) { + var m = split_url(value); + if (m && m[uriDomain]) return m[uriDomain]; + } + + var extFromPage = /\.[^.]+$/; + function extract_ext(value) { + var page = extract_page(value); + if (page) { + var m = page.match(extFromPage); + if (m) return m[0]; + } + } + + function ext(dst, src) { + return url_wrapper(dst, src, extract_ext); + } + + function fqdn(dst, src) { + // TODO: fqdn and domain(eTLD+1) are currently the same. + return domain(dst, src); + } + + var pageFromPathRegExp = /\/([^\/]+)$/; + var pageName = 1; + + function extract_page(value) { + value = extract_path(value); + if (!value) return undefined; + var m = value.match(pageFromPathRegExp); + if (m) return m[pageName]; + } + + function page(dst, src) { + return url_wrapper(dst, src, extract_page); + } + + function extract_path(value) { + var m = split_url(value); + return m? m[uriPath] || m[uriPathAlt] : undefined; + } + + function path(dst, src) { + return url_wrapper(dst, src, extract_path); + } + + // Map common schemes to their default port. + // port has to be a string (will be converted at a later stage). + var schemePort = { + "ftp": "21", + "ssh": "22", + "http": "80", + "https": "443", + }; + + function extract_port(value) { + var m = split_url(value); + if (!m) return undefined; + if (m[uriPort]) return m[uriPort]; + if (m[uriScheme]) { + return schemePort[m[uriScheme]]; + } + } + + function port(dst, src) { + return url_wrapper(dst, src, extract_port); + } + + function extract_query(value) { + var m = split_url(value); + if (m && m[uriQuery]) return m[uriQuery]; + } + + function query(dst, src) { + return url_wrapper(dst, src, extract_query); + } + + function extract_root(value) { + var m = split_url(value); + if (m && m[uriDomain] && m[uriDomain]) { + var scheme = m[uriScheme] && m[uriScheme] !== "null"? + m[uriScheme] + "://" : ""; + var port = m[uriPort]? ":" + m[uriPort] : ""; + return scheme + m[uriDomain] + port; + } + } + + function root(dst, src) { + return url_wrapper(dst, src, extract_root); + } + + function tagval(id, src, cfg, keys, on_success) { + var fail = function(evt) { + evt.Put(FLAG_FIELD, "tagval_parsing_error"); + } + if (cfg.kv_separator.length !== 1) { + throw("Invalid TAGVALMAP ValueDelimiter (must have 1 character)"); + } + var quotes_len = cfg.open_quote.length > 0 && cfg.close_quote.length > 0? + cfg.open_quote.length + cfg.close_quote.length : 0; + var kv_regex = new RegExp('^([^' + cfg.kv_separator + ']*)*' + cfg.kv_separator + ' *(.*)*$'); + return function(evt) { + var msg = evt.Get(src); + if (msg === undefined) { + console.warn("tagval: input field is missing"); + return fail(evt); + } + var pairs = msg.split(cfg.pair_separator); + var i; + var success = false; + var prev = ""; + for (i=0; i 0 && + value.length >= cfg.open_quote.length + cfg.close_quote.length && + value.substr(0, cfg.open_quote.length) === cfg.open_quote && + value.substr(value.length - cfg.close_quote.length) === cfg.close_quote) { + value = value.substr(cfg.open_quote.length, value.length - quotes_len); + } + evt.Put(FIELDS_PREFIX + field, value); + success = true; + } + if (!success) { + return fail(evt); + } + if (on_success != null) { + on_success(evt); + } + } + } + + var ecs_mappings = { + "_facility": {convert: to_long, to:[{field: "log.syslog.facility.code", setter: fld_set}]}, + "_pri": {convert: to_long, to:[{field: "log.syslog.priority", setter: fld_set}]}, + "_severity": {convert: to_long, to:[{field: "log.syslog.severity.code", setter: fld_set}]}, + "action": {to:[{field: "event.action", setter: fld_prio, prio: 0}]}, + "administrator": {to:[{field: "related.user", setter: fld_append},{field: "user.name", setter: fld_prio, prio: 4}]}, + "alias.ip": {convert: to_ip, to:[{field: "host.ip", setter: fld_prio, prio: 3},{field: "related.ip", setter: fld_append}]}, + "alias.ipv6": {convert: to_ip, to:[{field: "host.ip", setter: fld_prio, prio: 4},{field: "related.ip", setter: fld_append}]}, + "alias.mac": {convert: to_mac, to:[{field: "host.mac", setter: fld_prio, prio: 1}]}, + "application": {to:[{field: "network.application", setter: fld_set}]}, + "bytes": {convert: to_long, to:[{field: "network.bytes", setter: fld_set}]}, + "c_domain": {to:[{field: "source.domain", setter: fld_prio, prio: 1}]}, + "c_logon_id": {to:[{field: "user.id", setter: fld_prio, prio: 2}]}, + "c_user_name": {to:[{field: "related.user", setter: fld_append},{field: "user.name", setter: fld_prio, prio: 8}]}, + "c_username": {to:[{field: "related.user", setter: fld_append},{field: "user.name", setter: fld_prio, prio: 2}]}, + "cctld": {to:[{field: "url.top_level_domain", setter: fld_prio, prio: 1}]}, + "child_pid": {convert: to_long, to:[{field: "process.pid", setter: fld_prio, prio: 1}]}, + "child_pid_val": {to:[{field: "process.title", setter: fld_set}]}, + "child_process": {to:[{field: "process.name", setter: fld_prio, prio: 1}]}, + "city.dst": {to:[{field: "destination.geo.city_name", setter: fld_set}]}, + "city.src": {to:[{field: "source.geo.city_name", setter: fld_set}]}, + "daddr": {convert: to_ip, to:[{field: "destination.ip", setter: fld_set},{field: "related.ip", setter: fld_append}]}, + "daddr_v6": {convert: to_ip, to:[{field: "destination.ip", setter: fld_set},{field: "related.ip", setter: fld_append}]}, + "ddomain": {to:[{field: "destination.domain", setter: fld_prio, prio: 0}]}, + "devicehostip": {convert: to_ip, to:[{field: "host.ip", setter: fld_prio, prio: 2},{field: "related.ip", setter: fld_append}]}, + "devicehostmac": {convert: to_mac, to:[{field: "host.mac", setter: fld_prio, prio: 0}]}, + "dhost": {to:[{field: "destination.address", setter: fld_set},{field: "related.hosts", setter: fld_append}]}, + "dinterface": {to:[{field: "observer.egress.interface.name", setter: fld_set}]}, + "direction": {to:[{field: "network.direction", setter: fld_set}]}, + "directory": {to:[{field: "file.directory", setter: fld_set}]}, + "dmacaddr": {convert: to_mac, to:[{field: "destination.mac", setter: fld_set}]}, + "dns.responsetype": {to:[{field: "dns.answers.type", setter: fld_set}]}, + "dns.resptext": {to:[{field: "dns.answers.name", setter: fld_set}]}, + "dns_querytype": {to:[{field: "dns.question.type", setter: fld_set}]}, + "domain": {to:[{field: "server.domain", setter: fld_prio, prio: 0},{field: "related.hosts", setter: fld_append}]}, + "domain.dst": {to:[{field: "destination.domain", setter: fld_prio, prio: 1}]}, + "domain.src": {to:[{field: "source.domain", setter: fld_prio, prio: 2}]}, + "domain_id": {to:[{field: "user.domain", setter: fld_set}]}, + "domainname": {to:[{field: "server.domain", setter: fld_prio, prio: 1}]}, + "dport": {convert: to_long, to:[{field: "destination.port", setter: fld_prio, prio: 0}]}, + "dtransaddr": {convert: to_ip, to:[{field: "destination.nat.ip", setter: fld_prio, prio: 0},{field: "related.ip", setter: fld_append}]}, + "dtransport": {convert: to_long, to:[{field: "destination.nat.port", setter: fld_prio, prio: 0}]}, + "ec_outcome": {to:[{field: "event.outcome", setter: fld_ecs_outcome}]}, + "event_description": {to:[{field: "message", setter: fld_prio, prio: 0}]}, + "event_source": {to:[{field: "related.hosts", setter: fld_append}]}, + "event_time": {convert: to_date, to:[{field: "@timestamp", setter: fld_set}]}, + "event_type": {to:[{field: "event.action", setter: fld_prio, prio: 1}]}, + "extension": {to:[{field: "file.extension", setter: fld_prio, prio: 1}]}, + "file.attributes": {to:[{field: "file.attributes", setter: fld_set}]}, + "filename": {to:[{field: "file.name", setter: fld_prio, prio: 0}]}, + "filename_size": {convert: to_long, to:[{field: "file.size", setter: fld_set}]}, + "filepath": {to:[{field: "file.path", setter: fld_set}]}, + "filetype": {to:[{field: "file.type", setter: fld_set}]}, + "fqdn": {to:[{field: "related.hosts", setter: fld_append}]}, + "group": {to:[{field: "group.name", setter: fld_set}]}, + "groupid": {to:[{field: "group.id", setter: fld_set}]}, + "host": {to:[{field: "host.name", setter: fld_prio, prio: 1},{field: "related.hosts", setter: fld_append}]}, + "hostip": {convert: to_ip, to:[{field: "host.ip", setter: fld_prio, prio: 0},{field: "related.ip", setter: fld_append}]}, + "hostip_v6": {convert: to_ip, to:[{field: "host.ip", setter: fld_prio, prio: 1},{field: "related.ip", setter: fld_append}]}, + "hostname": {to:[{field: "host.name", setter: fld_prio, prio: 0}]}, + "id": {to:[{field: "event.code", setter: fld_prio, prio: 0}]}, + "interface": {to:[{field: "network.interface.name", setter: fld_set}]}, + "ip.orig": {convert: to_ip, to:[{field: "network.forwarded_ip", setter: fld_prio, prio: 0},{field: "related.ip", setter: fld_append}]}, + "ip.trans.dst": {convert: to_ip, to:[{field: "destination.nat.ip", setter: fld_prio, prio: 1},{field: "related.ip", setter: fld_append}]}, + "ip.trans.src": {convert: to_ip, to:[{field: "source.nat.ip", setter: fld_prio, prio: 1},{field: "related.ip", setter: fld_append}]}, + "ipv6.orig": {convert: to_ip, to:[{field: "network.forwarded_ip", setter: fld_prio, prio: 2},{field: "related.ip", setter: fld_append}]}, + "latdec_dst": {convert: to_double, to:[{field: "destination.geo.location.lat", setter: fld_set}]}, + "latdec_src": {convert: to_double, to:[{field: "source.geo.location.lat", setter: fld_set}]}, + "location_city": {to:[{field: "geo.city_name", setter: fld_set}]}, + "location_country": {to:[{field: "geo.country_name", setter: fld_set}]}, + "location_desc": {to:[{field: "geo.name", setter: fld_set}]}, + "location_dst": {to:[{field: "destination.geo.country_name", setter: fld_set}]}, + "location_src": {to:[{field: "source.geo.country_name", setter: fld_set}]}, + "location_state": {to:[{field: "geo.region_name", setter: fld_set}]}, + "logon_id": {to:[{field: "related.user", setter: fld_append},{field: "user.name", setter: fld_prio, prio: 5}]}, + "longdec_dst": {convert: to_double, to:[{field: "destination.geo.location.lon", setter: fld_set}]}, + "longdec_src": {convert: to_double, to:[{field: "source.geo.location.lon", setter: fld_set}]}, + "macaddr": {convert: to_mac, to:[{field: "host.mac", setter: fld_prio, prio: 2}]}, + "messageid": {to:[{field: "event.code", setter: fld_prio, prio: 1}]}, + "method": {to:[{field: "http.request.method", setter: fld_set}]}, + "msg": {to:[{field: "message", setter: fld_set}]}, + "orig_ip": {convert: to_ip, to:[{field: "network.forwarded_ip", setter: fld_prio, prio: 1},{field: "related.ip", setter: fld_append}]}, + "owner": {to:[{field: "related.user", setter: fld_append},{field: "user.name", setter: fld_prio, prio: 6}]}, + "packets": {convert: to_long, to:[{field: "network.packets", setter: fld_set}]}, + "parent_pid": {convert: to_long, to:[{field: "process.parent.pid", setter: fld_prio, prio: 0}]}, + "parent_pid_val": {to:[{field: "process.parent.title", setter: fld_set}]}, + "parent_process": {to:[{field: "process.parent.name", setter: fld_prio, prio: 0}]}, + "patient_fullname": {to:[{field: "user.full_name", setter: fld_prio, prio: 1}]}, + "port.dst": {convert: to_long, to:[{field: "destination.port", setter: fld_prio, prio: 1}]}, + "port.src": {convert: to_long, to:[{field: "source.port", setter: fld_prio, prio: 1}]}, + "port.trans.dst": {convert: to_long, to:[{field: "destination.nat.port", setter: fld_prio, prio: 1}]}, + "port.trans.src": {convert: to_long, to:[{field: "source.nat.port", setter: fld_prio, prio: 1}]}, + "process": {to:[{field: "process.name", setter: fld_prio, prio: 0}]}, + "process_id": {convert: to_long, to:[{field: "process.pid", setter: fld_prio, prio: 0}]}, + "process_id_src": {convert: to_long, to:[{field: "process.parent.pid", setter: fld_prio, prio: 1}]}, + "process_src": {to:[{field: "process.parent.name", setter: fld_prio, prio: 1}]}, + "product": {to:[{field: "observer.product", setter: fld_set}]}, + "protocol": {to:[{field: "network.protocol", setter: fld_set}]}, + "query": {to:[{field: "url.query", setter: fld_prio, prio: 2}]}, + "rbytes": {convert: to_long, to:[{field: "destination.bytes", setter: fld_set}]}, + "referer": {to:[{field: "http.request.referrer", setter: fld_prio, prio: 1}]}, + "rulename": {to:[{field: "rule.name", setter: fld_set}]}, + "saddr": {convert: to_ip, to:[{field: "source.ip", setter: fld_set},{field: "related.ip", setter: fld_append}]}, + "saddr_v6": {convert: to_ip, to:[{field: "source.ip", setter: fld_set},{field: "related.ip", setter: fld_append}]}, + "sbytes": {convert: to_long, to:[{field: "source.bytes", setter: fld_set}]}, + "sdomain": {to:[{field: "source.domain", setter: fld_prio, prio: 0}]}, + "service": {to:[{field: "service.name", setter: fld_prio, prio: 1}]}, + "service.name": {to:[{field: "service.name", setter: fld_prio, prio: 0}]}, + "service_account": {to:[{field: "related.user", setter: fld_append},{field: "user.name", setter: fld_prio, prio: 7}]}, + "severity": {to:[{field: "log.level", setter: fld_set}]}, + "shost": {to:[{field: "host.hostname", setter: fld_set},{field: "source.address", setter: fld_set},{field: "related.hosts", setter: fld_append}]}, + "sinterface": {to:[{field: "observer.ingress.interface.name", setter: fld_set}]}, + "sld": {to:[{field: "url.registered_domain", setter: fld_set}]}, + "smacaddr": {convert: to_mac, to:[{field: "source.mac", setter: fld_set}]}, + "sport": {convert: to_long, to:[{field: "source.port", setter: fld_prio, prio: 0}]}, + "stransaddr": {convert: to_ip, to:[{field: "source.nat.ip", setter: fld_prio, prio: 0},{field: "related.ip", setter: fld_append}]}, + "stransport": {convert: to_long, to:[{field: "source.nat.port", setter: fld_prio, prio: 0}]}, + "tcp.dstport": {convert: to_long, to:[{field: "destination.port", setter: fld_prio, prio: 2}]}, + "tcp.srcport": {convert: to_long, to:[{field: "source.port", setter: fld_prio, prio: 2}]}, + "timezone": {to:[{field: "event.timezone", setter: fld_set}]}, + "tld": {to:[{field: "url.top_level_domain", setter: fld_prio, prio: 0}]}, + "udp.dstport": {convert: to_long, to:[{field: "destination.port", setter: fld_prio, prio: 3}]}, + "udp.srcport": {convert: to_long, to:[{field: "source.port", setter: fld_prio, prio: 3}]}, + "uid": {to:[{field: "related.user", setter: fld_append},{field: "user.name", setter: fld_prio, prio: 3}]}, + "url": {to:[{field: "url.original", setter: fld_prio, prio: 1}]}, + "url_raw": {to:[{field: "url.original", setter: fld_prio, prio: 0}]}, + "urldomain": {to:[{field: "url.domain", setter: fld_prio, prio: 0}]}, + "urlquery": {to:[{field: "url.query", setter: fld_prio, prio: 0}]}, + "user": {to:[{field: "related.user", setter: fld_append},{field: "user.name", setter: fld_prio, prio: 0}]}, + "user.id": {to:[{field: "user.id", setter: fld_prio, prio: 1}]}, + "user_agent": {to:[{field: "user_agent.original", setter: fld_set}]}, + "user_fullname": {to:[{field: "user.full_name", setter: fld_prio, prio: 0}]}, + "user_id": {to:[{field: "user.id", setter: fld_prio, prio: 0}]}, + "username": {to:[{field: "related.user", setter: fld_append},{field: "user.name", setter: fld_prio, prio: 1}]}, + "version": {to:[{field: "observer.version", setter: fld_set}]}, + "web_domain": {to:[{field: "url.domain", setter: fld_prio, prio: 1},{field: "related.hosts", setter: fld_append}]}, + "web_extension": {to:[{field: "file.extension", setter: fld_prio, prio: 0}]}, + "web_query": {to:[{field: "url.query", setter: fld_prio, prio: 1}]}, + "web_ref_domain": {to:[{field: "related.hosts", setter: fld_append}]}, + "web_referer": {to:[{field: "http.request.referrer", setter: fld_prio, prio: 0}]}, + "web_root": {to:[{field: "url.path", setter: fld_set}]}, + "webpage": {to:[{field: "file.name", setter: fld_prio, prio: 1}]}, + }; + + var rsa_mappings = { + "access_point": {to:[{field: "rsa.wireless.access_point", setter: fld_set}]}, + "accesses": {to:[{field: "rsa.identity.accesses", setter: fld_set}]}, + "acl_id": {to:[{field: "rsa.misc.acl_id", setter: fld_set}]}, + "acl_op": {to:[{field: "rsa.misc.acl_op", setter: fld_set}]}, + "acl_pos": {to:[{field: "rsa.misc.acl_pos", setter: fld_set}]}, + "acl_table": {to:[{field: "rsa.misc.acl_table", setter: fld_set}]}, + "action": {to:[{field: "rsa.misc.action", setter: fld_append}]}, + "ad_computer_dst": {to:[{field: "rsa.network.ad_computer_dst", setter: fld_set}]}, + "addr": {to:[{field: "rsa.network.addr", setter: fld_set}]}, + "admin": {to:[{field: "rsa.misc.admin", setter: fld_set}]}, + "agent": {to:[{field: "rsa.misc.client", setter: fld_prio, prio: 0}]}, + "agent.id": {to:[{field: "rsa.misc.agent_id", setter: fld_set}]}, + "alarm_id": {to:[{field: "rsa.misc.alarm_id", setter: fld_set}]}, + "alarmname": {to:[{field: "rsa.misc.alarmname", setter: fld_set}]}, + "alert": {to:[{field: "rsa.threat.alert", setter: fld_set}]}, + "alert_id": {to:[{field: "rsa.misc.alert_id", setter: fld_set}]}, + "alias.host": {to:[{field: "rsa.network.alias_host", setter: fld_append}]}, + "analysis.file": {to:[{field: "rsa.investigations.analysis_file", setter: fld_set}]}, + "analysis.service": {to:[{field: "rsa.investigations.analysis_service", setter: fld_set}]}, + "analysis.session": {to:[{field: "rsa.investigations.analysis_session", setter: fld_set}]}, + "app_id": {to:[{field: "rsa.misc.app_id", setter: fld_set}]}, + "attachment": {to:[{field: "rsa.file.attachment", setter: fld_set}]}, + "audit": {to:[{field: "rsa.misc.audit", setter: fld_set}]}, + "audit_class": {to:[{field: "rsa.internal.audit_class", setter: fld_set}]}, + "audit_object": {to:[{field: "rsa.misc.audit_object", setter: fld_set}]}, + "auditdata": {to:[{field: "rsa.misc.auditdata", setter: fld_set}]}, + "authmethod": {to:[{field: "rsa.identity.auth_method", setter: fld_set}]}, + "autorun_type": {to:[{field: "rsa.misc.autorun_type", setter: fld_set}]}, + "bcc": {to:[{field: "rsa.email.email", setter: fld_append}]}, + "benchmark": {to:[{field: "rsa.misc.benchmark", setter: fld_set}]}, + "binary": {to:[{field: "rsa.file.binary", setter: fld_set}]}, + "boc": {to:[{field: "rsa.investigations.boc", setter: fld_set}]}, + "bssid": {to:[{field: "rsa.wireless.wlan_ssid", setter: fld_prio, prio: 1}]}, + "bypass": {to:[{field: "rsa.misc.bypass", setter: fld_set}]}, + "c_sid": {to:[{field: "rsa.identity.user_sid_src", setter: fld_set}]}, + "cache": {to:[{field: "rsa.misc.cache", setter: fld_set}]}, + "cache_hit": {to:[{field: "rsa.misc.cache_hit", setter: fld_set}]}, + "calling_from": {to:[{field: "rsa.misc.phone", setter: fld_prio, prio: 1}]}, + "calling_to": {to:[{field: "rsa.misc.phone", setter: fld_prio, prio: 0}]}, + "category": {to:[{field: "rsa.misc.category", setter: fld_set}]}, + "cc": {to:[{field: "rsa.email.email", setter: fld_append}]}, + "cc.number": {convert: to_long, to:[{field: "rsa.misc.cc_number", setter: fld_set}]}, + "cefversion": {to:[{field: "rsa.misc.cefversion", setter: fld_set}]}, + "cert.serial": {to:[{field: "rsa.crypto.cert_serial", setter: fld_set}]}, + "cert_ca": {to:[{field: "rsa.crypto.cert_ca", setter: fld_set}]}, + "cert_checksum": {to:[{field: "rsa.crypto.cert_checksum", setter: fld_set}]}, + "cert_common": {to:[{field: "rsa.crypto.cert_common", setter: fld_set}]}, + "cert_error": {to:[{field: "rsa.crypto.cert_error", setter: fld_set}]}, + "cert_hostname": {to:[{field: "rsa.crypto.cert_host_name", setter: fld_set}]}, + "cert_hostname_cat": {to:[{field: "rsa.crypto.cert_host_cat", setter: fld_set}]}, + "cert_issuer": {to:[{field: "rsa.crypto.cert_issuer", setter: fld_set}]}, + "cert_keysize": {to:[{field: "rsa.crypto.cert_keysize", setter: fld_set}]}, + "cert_status": {to:[{field: "rsa.crypto.cert_status", setter: fld_set}]}, + "cert_subject": {to:[{field: "rsa.crypto.cert_subject", setter: fld_set}]}, + "cert_username": {to:[{field: "rsa.crypto.cert_username", setter: fld_set}]}, + "cfg.attr": {to:[{field: "rsa.misc.cfg_attr", setter: fld_set}]}, + "cfg.obj": {to:[{field: "rsa.misc.cfg_obj", setter: fld_set}]}, + "cfg.path": {to:[{field: "rsa.misc.cfg_path", setter: fld_set}]}, + "change_attribute": {to:[{field: "rsa.misc.change_attrib", setter: fld_set}]}, + "change_new": {to:[{field: "rsa.misc.change_new", setter: fld_set}]}, + "change_old": {to:[{field: "rsa.misc.change_old", setter: fld_set}]}, + "changes": {to:[{field: "rsa.misc.changes", setter: fld_set}]}, + "checksum": {to:[{field: "rsa.misc.checksum", setter: fld_set}]}, + "checksum.dst": {to:[{field: "rsa.misc.checksum_dst", setter: fld_set}]}, + "checksum.src": {to:[{field: "rsa.misc.checksum_src", setter: fld_set}]}, + "cid": {to:[{field: "rsa.internal.cid", setter: fld_set}]}, + "client": {to:[{field: "rsa.misc.client", setter: fld_prio, prio: 1}]}, + "client_ip": {to:[{field: "rsa.misc.client_ip", setter: fld_set}]}, + "clustermembers": {to:[{field: "rsa.misc.clustermembers", setter: fld_set}]}, + "cmd": {to:[{field: "rsa.misc.cmd", setter: fld_set}]}, + "cn_acttimeout": {to:[{field: "rsa.misc.cn_acttimeout", setter: fld_set}]}, + "cn_asn_dst": {to:[{field: "rsa.web.cn_asn_dst", setter: fld_set}]}, + "cn_asn_src": {to:[{field: "rsa.misc.cn_asn_src", setter: fld_set}]}, + "cn_bgpv4nxthop": {to:[{field: "rsa.misc.cn_bgpv4nxthop", setter: fld_set}]}, + "cn_ctr_dst_code": {to:[{field: "rsa.misc.cn_ctr_dst_code", setter: fld_set}]}, + "cn_dst_tos": {to:[{field: "rsa.misc.cn_dst_tos", setter: fld_set}]}, + "cn_dst_vlan": {to:[{field: "rsa.misc.cn_dst_vlan", setter: fld_set}]}, + "cn_engine_id": {to:[{field: "rsa.misc.cn_engine_id", setter: fld_set}]}, + "cn_engine_type": {to:[{field: "rsa.misc.cn_engine_type", setter: fld_set}]}, + "cn_f_switch": {to:[{field: "rsa.misc.cn_f_switch", setter: fld_set}]}, + "cn_flowsampid": {to:[{field: "rsa.misc.cn_flowsampid", setter: fld_set}]}, + "cn_flowsampintv": {to:[{field: "rsa.misc.cn_flowsampintv", setter: fld_set}]}, + "cn_flowsampmode": {to:[{field: "rsa.misc.cn_flowsampmode", setter: fld_set}]}, + "cn_inacttimeout": {to:[{field: "rsa.misc.cn_inacttimeout", setter: fld_set}]}, + "cn_inpermbyts": {to:[{field: "rsa.misc.cn_inpermbyts", setter: fld_set}]}, + "cn_inpermpckts": {to:[{field: "rsa.misc.cn_inpermpckts", setter: fld_set}]}, + "cn_invalid": {to:[{field: "rsa.misc.cn_invalid", setter: fld_set}]}, + "cn_ip_proto_ver": {to:[{field: "rsa.misc.cn_ip_proto_ver", setter: fld_set}]}, + "cn_ipv4_ident": {to:[{field: "rsa.misc.cn_ipv4_ident", setter: fld_set}]}, + "cn_l_switch": {to:[{field: "rsa.misc.cn_l_switch", setter: fld_set}]}, + "cn_log_did": {to:[{field: "rsa.misc.cn_log_did", setter: fld_set}]}, + "cn_log_rid": {to:[{field: "rsa.misc.cn_log_rid", setter: fld_set}]}, + "cn_max_ttl": {to:[{field: "rsa.misc.cn_max_ttl", setter: fld_set}]}, + "cn_maxpcktlen": {to:[{field: "rsa.misc.cn_maxpcktlen", setter: fld_set}]}, + "cn_min_ttl": {to:[{field: "rsa.misc.cn_min_ttl", setter: fld_set}]}, + "cn_minpcktlen": {to:[{field: "rsa.misc.cn_minpcktlen", setter: fld_set}]}, + "cn_mpls_lbl_1": {to:[{field: "rsa.misc.cn_mpls_lbl_1", setter: fld_set}]}, + "cn_mpls_lbl_10": {to:[{field: "rsa.misc.cn_mpls_lbl_10", setter: fld_set}]}, + "cn_mpls_lbl_2": {to:[{field: "rsa.misc.cn_mpls_lbl_2", setter: fld_set}]}, + "cn_mpls_lbl_3": {to:[{field: "rsa.misc.cn_mpls_lbl_3", setter: fld_set}]}, + "cn_mpls_lbl_4": {to:[{field: "rsa.misc.cn_mpls_lbl_4", setter: fld_set}]}, + "cn_mpls_lbl_5": {to:[{field: "rsa.misc.cn_mpls_lbl_5", setter: fld_set}]}, + "cn_mpls_lbl_6": {to:[{field: "rsa.misc.cn_mpls_lbl_6", setter: fld_set}]}, + "cn_mpls_lbl_7": {to:[{field: "rsa.misc.cn_mpls_lbl_7", setter: fld_set}]}, + "cn_mpls_lbl_8": {to:[{field: "rsa.misc.cn_mpls_lbl_8", setter: fld_set}]}, + "cn_mpls_lbl_9": {to:[{field: "rsa.misc.cn_mpls_lbl_9", setter: fld_set}]}, + "cn_mplstoplabel": {to:[{field: "rsa.misc.cn_mplstoplabel", setter: fld_set}]}, + "cn_mplstoplabip": {to:[{field: "rsa.misc.cn_mplstoplabip", setter: fld_set}]}, + "cn_mul_dst_byt": {to:[{field: "rsa.misc.cn_mul_dst_byt", setter: fld_set}]}, + "cn_mul_dst_pks": {to:[{field: "rsa.misc.cn_mul_dst_pks", setter: fld_set}]}, + "cn_muligmptype": {to:[{field: "rsa.misc.cn_muligmptype", setter: fld_set}]}, + "cn_rpackets": {to:[{field: "rsa.web.cn_rpackets", setter: fld_set}]}, + "cn_sampalgo": {to:[{field: "rsa.misc.cn_sampalgo", setter: fld_set}]}, + "cn_sampint": {to:[{field: "rsa.misc.cn_sampint", setter: fld_set}]}, + "cn_seqctr": {to:[{field: "rsa.misc.cn_seqctr", setter: fld_set}]}, + "cn_spackets": {to:[{field: "rsa.misc.cn_spackets", setter: fld_set}]}, + "cn_src_tos": {to:[{field: "rsa.misc.cn_src_tos", setter: fld_set}]}, + "cn_src_vlan": {to:[{field: "rsa.misc.cn_src_vlan", setter: fld_set}]}, + "cn_sysuptime": {to:[{field: "rsa.misc.cn_sysuptime", setter: fld_set}]}, + "cn_template_id": {to:[{field: "rsa.misc.cn_template_id", setter: fld_set}]}, + "cn_totbytsexp": {to:[{field: "rsa.misc.cn_totbytsexp", setter: fld_set}]}, + "cn_totflowexp": {to:[{field: "rsa.misc.cn_totflowexp", setter: fld_set}]}, + "cn_totpcktsexp": {to:[{field: "rsa.misc.cn_totpcktsexp", setter: fld_set}]}, + "cn_unixnanosecs": {to:[{field: "rsa.misc.cn_unixnanosecs", setter: fld_set}]}, + "cn_v6flowlabel": {to:[{field: "rsa.misc.cn_v6flowlabel", setter: fld_set}]}, + "cn_v6optheaders": {to:[{field: "rsa.misc.cn_v6optheaders", setter: fld_set}]}, + "code": {to:[{field: "rsa.misc.code", setter: fld_set}]}, + "command": {to:[{field: "rsa.misc.command", setter: fld_set}]}, + "comments": {to:[{field: "rsa.misc.comments", setter: fld_set}]}, + "comp_class": {to:[{field: "rsa.misc.comp_class", setter: fld_set}]}, + "comp_name": {to:[{field: "rsa.misc.comp_name", setter: fld_set}]}, + "comp_rbytes": {to:[{field: "rsa.misc.comp_rbytes", setter: fld_set}]}, + "comp_sbytes": {to:[{field: "rsa.misc.comp_sbytes", setter: fld_set}]}, + "component_version": {to:[{field: "rsa.misc.comp_version", setter: fld_set}]}, + "connection_id": {to:[{field: "rsa.misc.connection_id", setter: fld_prio, prio: 1}]}, + "connectionid": {to:[{field: "rsa.misc.connection_id", setter: fld_prio, prio: 0}]}, + "content": {to:[{field: "rsa.misc.content", setter: fld_set}]}, + "content_type": {to:[{field: "rsa.misc.content_type", setter: fld_set}]}, + "content_version": {to:[{field: "rsa.misc.content_version", setter: fld_set}]}, + "context": {to:[{field: "rsa.misc.context", setter: fld_set}]}, + "count": {to:[{field: "rsa.misc.count", setter: fld_set}]}, + "cpu": {convert: to_long, to:[{field: "rsa.misc.cpu", setter: fld_set}]}, + "cpu_data": {to:[{field: "rsa.misc.cpu_data", setter: fld_set}]}, + "criticality": {to:[{field: "rsa.misc.criticality", setter: fld_set}]}, + "cs_agency_dst": {to:[{field: "rsa.misc.cs_agency_dst", setter: fld_set}]}, + "cs_analyzedby": {to:[{field: "rsa.misc.cs_analyzedby", setter: fld_set}]}, + "cs_av_other": {to:[{field: "rsa.misc.cs_av_other", setter: fld_set}]}, + "cs_av_primary": {to:[{field: "rsa.misc.cs_av_primary", setter: fld_set}]}, + "cs_av_secondary": {to:[{field: "rsa.misc.cs_av_secondary", setter: fld_set}]}, + "cs_bgpv6nxthop": {to:[{field: "rsa.misc.cs_bgpv6nxthop", setter: fld_set}]}, + "cs_bit9status": {to:[{field: "rsa.misc.cs_bit9status", setter: fld_set}]}, + "cs_context": {to:[{field: "rsa.misc.cs_context", setter: fld_set}]}, + "cs_control": {to:[{field: "rsa.misc.cs_control", setter: fld_set}]}, + "cs_data": {to:[{field: "rsa.misc.cs_data", setter: fld_set}]}, + "cs_datecret": {to:[{field: "rsa.misc.cs_datecret", setter: fld_set}]}, + "cs_dst_tld": {to:[{field: "rsa.misc.cs_dst_tld", setter: fld_set}]}, + "cs_eth_dst_ven": {to:[{field: "rsa.misc.cs_eth_dst_ven", setter: fld_set}]}, + "cs_eth_src_ven": {to:[{field: "rsa.misc.cs_eth_src_ven", setter: fld_set}]}, + "cs_event_uuid": {to:[{field: "rsa.misc.cs_event_uuid", setter: fld_set}]}, + "cs_filetype": {to:[{field: "rsa.misc.cs_filetype", setter: fld_set}]}, + "cs_fld": {to:[{field: "rsa.misc.cs_fld", setter: fld_set}]}, + "cs_if_desc": {to:[{field: "rsa.misc.cs_if_desc", setter: fld_set}]}, + "cs_if_name": {to:[{field: "rsa.misc.cs_if_name", setter: fld_set}]}, + "cs_ip_next_hop": {to:[{field: "rsa.misc.cs_ip_next_hop", setter: fld_set}]}, + "cs_ipv4dstpre": {to:[{field: "rsa.misc.cs_ipv4dstpre", setter: fld_set}]}, + "cs_ipv4srcpre": {to:[{field: "rsa.misc.cs_ipv4srcpre", setter: fld_set}]}, + "cs_lifetime": {to:[{field: "rsa.misc.cs_lifetime", setter: fld_set}]}, + "cs_log_medium": {to:[{field: "rsa.misc.cs_log_medium", setter: fld_set}]}, + "cs_loginname": {to:[{field: "rsa.misc.cs_loginname", setter: fld_set}]}, + "cs_modulescore": {to:[{field: "rsa.misc.cs_modulescore", setter: fld_set}]}, + "cs_modulesign": {to:[{field: "rsa.misc.cs_modulesign", setter: fld_set}]}, + "cs_opswatresult": {to:[{field: "rsa.misc.cs_opswatresult", setter: fld_set}]}, + "cs_payload": {to:[{field: "rsa.misc.cs_payload", setter: fld_set}]}, + "cs_registrant": {to:[{field: "rsa.misc.cs_registrant", setter: fld_set}]}, + "cs_registrar": {to:[{field: "rsa.misc.cs_registrar", setter: fld_set}]}, + "cs_represult": {to:[{field: "rsa.misc.cs_represult", setter: fld_set}]}, + "cs_rpayload": {to:[{field: "rsa.misc.cs_rpayload", setter: fld_set}]}, + "cs_sampler_name": {to:[{field: "rsa.misc.cs_sampler_name", setter: fld_set}]}, + "cs_sourcemodule": {to:[{field: "rsa.misc.cs_sourcemodule", setter: fld_set}]}, + "cs_streams": {to:[{field: "rsa.misc.cs_streams", setter: fld_set}]}, + "cs_targetmodule": {to:[{field: "rsa.misc.cs_targetmodule", setter: fld_set}]}, + "cs_v6nxthop": {to:[{field: "rsa.misc.cs_v6nxthop", setter: fld_set}]}, + "cs_whois_server": {to:[{field: "rsa.misc.cs_whois_server", setter: fld_set}]}, + "cs_yararesult": {to:[{field: "rsa.misc.cs_yararesult", setter: fld_set}]}, + "cve": {to:[{field: "rsa.misc.cve", setter: fld_set}]}, + "d_certauth": {to:[{field: "rsa.crypto.d_certauth", setter: fld_set}]}, + "d_cipher": {to:[{field: "rsa.crypto.cipher_dst", setter: fld_set}]}, + "d_ciphersize": {convert: to_long, to:[{field: "rsa.crypto.cipher_size_dst", setter: fld_set}]}, + "d_sslver": {to:[{field: "rsa.crypto.ssl_ver_dst", setter: fld_set}]}, + "data": {to:[{field: "rsa.internal.data", setter: fld_set}]}, + "data_type": {to:[{field: "rsa.misc.data_type", setter: fld_set}]}, + "date": {to:[{field: "rsa.time.date", setter: fld_set}]}, + "datetime": {to:[{field: "rsa.time.datetime", setter: fld_set}]}, + "day": {to:[{field: "rsa.time.day", setter: fld_set}]}, + "db_id": {to:[{field: "rsa.db.db_id", setter: fld_set}]}, + "db_name": {to:[{field: "rsa.db.database", setter: fld_set}]}, + "db_pid": {convert: to_long, to:[{field: "rsa.db.db_pid", setter: fld_set}]}, + "dclass_counter1": {convert: to_long, to:[{field: "rsa.counters.dclass_c1", setter: fld_set}]}, + "dclass_counter1_string": {to:[{field: "rsa.counters.dclass_c1_str", setter: fld_set}]}, + "dclass_counter2": {convert: to_long, to:[{field: "rsa.counters.dclass_c2", setter: fld_set}]}, + "dclass_counter2_string": {to:[{field: "rsa.counters.dclass_c2_str", setter: fld_set}]}, + "dclass_counter3": {convert: to_long, to:[{field: "rsa.counters.dclass_c3", setter: fld_set}]}, + "dclass_counter3_string": {to:[{field: "rsa.counters.dclass_c3_str", setter: fld_set}]}, + "dclass_ratio1": {to:[{field: "rsa.counters.dclass_r1", setter: fld_set}]}, + "dclass_ratio1_string": {to:[{field: "rsa.counters.dclass_r1_str", setter: fld_set}]}, + "dclass_ratio2": {to:[{field: "rsa.counters.dclass_r2", setter: fld_set}]}, + "dclass_ratio2_string": {to:[{field: "rsa.counters.dclass_r2_str", setter: fld_set}]}, + "dclass_ratio3": {to:[{field: "rsa.counters.dclass_r3", setter: fld_set}]}, + "dclass_ratio3_string": {to:[{field: "rsa.counters.dclass_r3_str", setter: fld_set}]}, + "dead": {convert: to_long, to:[{field: "rsa.internal.dead", setter: fld_set}]}, + "description": {to:[{field: "rsa.misc.description", setter: fld_set}]}, + "detail": {to:[{field: "rsa.misc.event_desc", setter: fld_set}]}, + "device": {to:[{field: "rsa.misc.device_name", setter: fld_set}]}, + "device.class": {to:[{field: "rsa.internal.device_class", setter: fld_set}]}, + "device.group": {to:[{field: "rsa.internal.device_group", setter: fld_set}]}, + "device.host": {to:[{field: "rsa.internal.device_host", setter: fld_set}]}, + "device.ip": {convert: to_ip, to:[{field: "rsa.internal.device_ip", setter: fld_set}]}, + "device.ipv6": {convert: to_ip, to:[{field: "rsa.internal.device_ipv6", setter: fld_set}]}, + "device.type": {to:[{field: "rsa.internal.device_type", setter: fld_set}]}, + "device.type.id": {convert: to_long, to:[{field: "rsa.internal.device_type_id", setter: fld_set}]}, + "devicehostname": {to:[{field: "rsa.network.alias_host", setter: fld_append}]}, + "devvendor": {to:[{field: "rsa.misc.devvendor", setter: fld_set}]}, + "dhost": {to:[{field: "rsa.network.host_dst", setter: fld_set}]}, + "did": {to:[{field: "rsa.internal.did", setter: fld_set}]}, + "dinterface": {to:[{field: "rsa.network.dinterface", setter: fld_set}]}, + "directory.dst": {to:[{field: "rsa.file.directory_dst", setter: fld_set}]}, + "directory.src": {to:[{field: "rsa.file.directory_src", setter: fld_set}]}, + "disk_volume": {to:[{field: "rsa.storage.disk_volume", setter: fld_set}]}, + "disposition": {to:[{field: "rsa.misc.disposition", setter: fld_set}]}, + "distance": {to:[{field: "rsa.misc.distance", setter: fld_set}]}, + "dmask": {to:[{field: "rsa.network.dmask", setter: fld_set}]}, + "dn": {to:[{field: "rsa.identity.dn", setter: fld_set}]}, + "dns_a_record": {to:[{field: "rsa.network.dns_a_record", setter: fld_set}]}, + "dns_cname_record": {to:[{field: "rsa.network.dns_cname_record", setter: fld_set}]}, + "dns_id": {to:[{field: "rsa.network.dns_id", setter: fld_set}]}, + "dns_opcode": {to:[{field: "rsa.network.dns_opcode", setter: fld_set}]}, + "dns_ptr_record": {to:[{field: "rsa.network.dns_ptr_record", setter: fld_set}]}, + "dns_resp": {to:[{field: "rsa.network.dns_resp", setter: fld_set}]}, + "dns_type": {to:[{field: "rsa.network.dns_type", setter: fld_set}]}, + "doc_number": {convert: to_long, to:[{field: "rsa.misc.doc_number", setter: fld_set}]}, + "domain": {to:[{field: "rsa.network.domain", setter: fld_set}]}, + "domain1": {to:[{field: "rsa.network.domain1", setter: fld_set}]}, + "dst_dn": {to:[{field: "rsa.identity.dn_dst", setter: fld_set}]}, + "dst_payload": {to:[{field: "rsa.misc.payload_dst", setter: fld_set}]}, + "dst_spi": {to:[{field: "rsa.misc.spi_dst", setter: fld_set}]}, + "dst_zone": {to:[{field: "rsa.network.zone_dst", setter: fld_set}]}, + "dstburb": {to:[{field: "rsa.misc.dstburb", setter: fld_set}]}, + "duration": {convert: to_double, to:[{field: "rsa.time.duration_time", setter: fld_set}]}, + "duration_string": {to:[{field: "rsa.time.duration_str", setter: fld_set}]}, + "ec_activity": {to:[{field: "rsa.investigations.ec_activity", setter: fld_set}]}, + "ec_outcome": {to:[{field: "rsa.investigations.ec_outcome", setter: fld_set}]}, + "ec_subject": {to:[{field: "rsa.investigations.ec_subject", setter: fld_set}]}, + "ec_theme": {to:[{field: "rsa.investigations.ec_theme", setter: fld_set}]}, + "edomain": {to:[{field: "rsa.misc.edomain", setter: fld_set}]}, + "edomaub": {to:[{field: "rsa.misc.edomaub", setter: fld_set}]}, + "effective_time": {convert: to_date, to:[{field: "rsa.time.effective_time", setter: fld_set}]}, + "ein.number": {convert: to_long, to:[{field: "rsa.misc.ein_number", setter: fld_set}]}, + "email": {to:[{field: "rsa.email.email", setter: fld_append}]}, + "encryption_type": {to:[{field: "rsa.crypto.crypto", setter: fld_set}]}, + "endtime": {convert: to_date, to:[{field: "rsa.time.endtime", setter: fld_set}]}, + "entropy.req": {convert: to_long, to:[{field: "rsa.internal.entropy_req", setter: fld_set}]}, + "entropy.res": {convert: to_long, to:[{field: "rsa.internal.entropy_res", setter: fld_set}]}, + "entry": {to:[{field: "rsa.internal.entry", setter: fld_set}]}, + "eoc": {to:[{field: "rsa.investigations.eoc", setter: fld_set}]}, + "error": {to:[{field: "rsa.misc.error", setter: fld_set}]}, + "eth_type": {convert: to_long, to:[{field: "rsa.network.eth_type", setter: fld_set}]}, + "euid": {to:[{field: "rsa.misc.euid", setter: fld_set}]}, + "event.cat": {convert: to_long, to:[{field: "rsa.investigations.event_cat", setter: fld_prio, prio: 1}]}, + "event.cat.name": {to:[{field: "rsa.investigations.event_cat_name", setter: fld_prio, prio: 1}]}, + "event_cat": {convert: to_long, to:[{field: "rsa.investigations.event_cat", setter: fld_prio, prio: 0}]}, + "event_cat_name": {to:[{field: "rsa.investigations.event_cat_name", setter: fld_prio, prio: 0}]}, + "event_category": {to:[{field: "rsa.misc.event_category", setter: fld_set}]}, + "event_computer": {to:[{field: "rsa.misc.event_computer", setter: fld_set}]}, + "event_counter": {convert: to_long, to:[{field: "rsa.counters.event_counter", setter: fld_set}]}, + "event_description": {to:[{field: "rsa.internal.event_desc", setter: fld_set}]}, + "event_id": {to:[{field: "rsa.misc.event_id", setter: fld_set}]}, + "event_log": {to:[{field: "rsa.misc.event_log", setter: fld_set}]}, + "event_name": {to:[{field: "rsa.internal.event_name", setter: fld_set}]}, + "event_queue_time": {convert: to_date, to:[{field: "rsa.time.event_queue_time", setter: fld_set}]}, + "event_source": {to:[{field: "rsa.misc.event_source", setter: fld_set}]}, + "event_state": {to:[{field: "rsa.misc.event_state", setter: fld_set}]}, + "event_time": {convert: to_date, to:[{field: "rsa.time.event_time", setter: fld_set}]}, + "event_time_str": {to:[{field: "rsa.time.event_time_str", setter: fld_prio, prio: 1}]}, + "event_time_string": {to:[{field: "rsa.time.event_time_str", setter: fld_prio, prio: 0}]}, + "event_type": {to:[{field: "rsa.misc.event_type", setter: fld_set}]}, + "event_user": {to:[{field: "rsa.misc.event_user", setter: fld_set}]}, + "eventtime": {to:[{field: "rsa.time.eventtime", setter: fld_set}]}, + "expected_val": {to:[{field: "rsa.misc.expected_val", setter: fld_set}]}, + "expiration_time": {convert: to_date, to:[{field: "rsa.time.expire_time", setter: fld_set}]}, + "expiration_time_string": {to:[{field: "rsa.time.expire_time_str", setter: fld_set}]}, + "facility": {to:[{field: "rsa.misc.facility", setter: fld_set}]}, + "facilityname": {to:[{field: "rsa.misc.facilityname", setter: fld_set}]}, + "faddr": {to:[{field: "rsa.network.faddr", setter: fld_set}]}, + "fcatnum": {to:[{field: "rsa.misc.fcatnum", setter: fld_set}]}, + "federated_idp": {to:[{field: "rsa.identity.federated_idp", setter: fld_set}]}, + "federated_sp": {to:[{field: "rsa.identity.federated_sp", setter: fld_set}]}, + "feed.category": {to:[{field: "rsa.internal.feed_category", setter: fld_set}]}, + "feed_desc": {to:[{field: "rsa.internal.feed_desc", setter: fld_set}]}, + "feed_name": {to:[{field: "rsa.internal.feed_name", setter: fld_set}]}, + "fhost": {to:[{field: "rsa.network.fhost", setter: fld_set}]}, + "file_entropy": {convert: to_double, to:[{field: "rsa.file.file_entropy", setter: fld_set}]}, + "file_vendor": {to:[{field: "rsa.file.file_vendor", setter: fld_set}]}, + "filename_dst": {to:[{field: "rsa.file.filename_dst", setter: fld_set}]}, + "filename_src": {to:[{field: "rsa.file.filename_src", setter: fld_set}]}, + "filename_tmp": {to:[{field: "rsa.file.filename_tmp", setter: fld_set}]}, + "filesystem": {to:[{field: "rsa.file.filesystem", setter: fld_set}]}, + "filter": {to:[{field: "rsa.misc.filter", setter: fld_set}]}, + "finterface": {to:[{field: "rsa.misc.finterface", setter: fld_set}]}, + "flags": {to:[{field: "rsa.misc.flags", setter: fld_set}]}, + "forensic_info": {to:[{field: "rsa.misc.forensic_info", setter: fld_set}]}, + "forward.ip": {convert: to_ip, to:[{field: "rsa.internal.forward_ip", setter: fld_set}]}, + "forward.ipv6": {convert: to_ip, to:[{field: "rsa.internal.forward_ipv6", setter: fld_set}]}, + "found": {to:[{field: "rsa.misc.found", setter: fld_set}]}, + "fport": {to:[{field: "rsa.network.fport", setter: fld_set}]}, + "fqdn": {to:[{field: "rsa.web.fqdn", setter: fld_set}]}, + "fresult": {convert: to_long, to:[{field: "rsa.misc.fresult", setter: fld_set}]}, + "from": {to:[{field: "rsa.email.email_src", setter: fld_set}]}, + "gaddr": {to:[{field: "rsa.misc.gaddr", setter: fld_set}]}, + "gateway": {to:[{field: "rsa.network.gateway", setter: fld_set}]}, + "gmtdate": {to:[{field: "rsa.time.gmtdate", setter: fld_set}]}, + "gmttime": {to:[{field: "rsa.time.gmttime", setter: fld_set}]}, + "group": {to:[{field: "rsa.misc.group", setter: fld_set}]}, + "group_object": {to:[{field: "rsa.misc.group_object", setter: fld_set}]}, + "groupid": {to:[{field: "rsa.misc.group_id", setter: fld_set}]}, + "h_code": {to:[{field: "rsa.internal.hcode", setter: fld_set}]}, + "hardware_id": {to:[{field: "rsa.misc.hardware_id", setter: fld_set}]}, + "header.id": {to:[{field: "rsa.internal.header_id", setter: fld_set}]}, + "host.orig": {to:[{field: "rsa.network.host_orig", setter: fld_set}]}, + "host.state": {to:[{field: "rsa.endpoint.host_state", setter: fld_set}]}, + "host.type": {to:[{field: "rsa.network.host_type", setter: fld_set}]}, + "host_role": {to:[{field: "rsa.identity.host_role", setter: fld_set}]}, + "hostid": {to:[{field: "rsa.network.alias_host", setter: fld_append}]}, + "hostname": {to:[{field: "rsa.network.alias_host", setter: fld_append}]}, + "hour": {to:[{field: "rsa.time.hour", setter: fld_set}]}, + "https.insact": {to:[{field: "rsa.crypto.https_insact", setter: fld_set}]}, + "https.valid": {to:[{field: "rsa.crypto.https_valid", setter: fld_set}]}, + "icmpcode": {convert: to_long, to:[{field: "rsa.network.icmp_code", setter: fld_set}]}, + "icmptype": {convert: to_long, to:[{field: "rsa.network.icmp_type", setter: fld_set}]}, + "id": {to:[{field: "rsa.misc.reference_id", setter: fld_set}]}, + "id1": {to:[{field: "rsa.misc.reference_id1", setter: fld_set}]}, + "id2": {to:[{field: "rsa.misc.reference_id2", setter: fld_set}]}, + "id3": {to:[{field: "rsa.misc.id3", setter: fld_set}]}, + "ike": {to:[{field: "rsa.crypto.ike", setter: fld_set}]}, + "ike_cookie1": {to:[{field: "rsa.crypto.ike_cookie1", setter: fld_set}]}, + "ike_cookie2": {to:[{field: "rsa.crypto.ike_cookie2", setter: fld_set}]}, + "im_buddyid": {to:[{field: "rsa.misc.im_buddyid", setter: fld_set}]}, + "im_buddyname": {to:[{field: "rsa.misc.im_buddyname", setter: fld_set}]}, + "im_client": {to:[{field: "rsa.misc.im_client", setter: fld_set}]}, + "im_croomid": {to:[{field: "rsa.misc.im_croomid", setter: fld_set}]}, + "im_croomtype": {to:[{field: "rsa.misc.im_croomtype", setter: fld_set}]}, + "im_members": {to:[{field: "rsa.misc.im_members", setter: fld_set}]}, + "im_userid": {to:[{field: "rsa.misc.im_userid", setter: fld_set}]}, + "im_username": {to:[{field: "rsa.misc.im_username", setter: fld_set}]}, + "index": {to:[{field: "rsa.misc.index", setter: fld_set}]}, + "info": {to:[{field: "rsa.db.index", setter: fld_set}]}, + "inode": {convert: to_long, to:[{field: "rsa.internal.inode", setter: fld_set}]}, + "inout": {to:[{field: "rsa.misc.inout", setter: fld_set}]}, + "instance": {to:[{field: "rsa.db.instance", setter: fld_set}]}, + "interface": {to:[{field: "rsa.network.interface", setter: fld_set}]}, + "inv.category": {to:[{field: "rsa.investigations.inv_category", setter: fld_set}]}, + "inv.context": {to:[{field: "rsa.investigations.inv_context", setter: fld_set}]}, + "ioc": {to:[{field: "rsa.investigations.ioc", setter: fld_set}]}, + "ip_proto": {convert: to_long, to:[{field: "rsa.network.ip_proto", setter: fld_set}]}, + "ipkt": {to:[{field: "rsa.misc.ipkt", setter: fld_set}]}, + "ipscat": {to:[{field: "rsa.misc.ipscat", setter: fld_set}]}, + "ipspri": {to:[{field: "rsa.misc.ipspri", setter: fld_set}]}, + "jobname": {to:[{field: "rsa.misc.jobname", setter: fld_set}]}, + "jobnum": {to:[{field: "rsa.misc.job_num", setter: fld_set}]}, + "laddr": {to:[{field: "rsa.network.laddr", setter: fld_set}]}, + "language": {to:[{field: "rsa.misc.language", setter: fld_set}]}, + "latitude": {to:[{field: "rsa.misc.latitude", setter: fld_set}]}, + "lc.cid": {to:[{field: "rsa.internal.lc_cid", setter: fld_set}]}, + "lc.ctime": {convert: to_date, to:[{field: "rsa.internal.lc_ctime", setter: fld_set}]}, + "ldap": {to:[{field: "rsa.identity.ldap", setter: fld_set}]}, + "ldap.query": {to:[{field: "rsa.identity.ldap_query", setter: fld_set}]}, + "ldap.response": {to:[{field: "rsa.identity.ldap_response", setter: fld_set}]}, + "level": {convert: to_long, to:[{field: "rsa.internal.level", setter: fld_set}]}, + "lhost": {to:[{field: "rsa.network.lhost", setter: fld_set}]}, + "library": {to:[{field: "rsa.misc.library", setter: fld_set}]}, + "lifetime": {convert: to_long, to:[{field: "rsa.misc.lifetime", setter: fld_set}]}, + "linenum": {to:[{field: "rsa.misc.linenum", setter: fld_set}]}, + "link": {to:[{field: "rsa.misc.link", setter: fld_set}]}, + "linterface": {to:[{field: "rsa.network.linterface", setter: fld_set}]}, + "list_name": {to:[{field: "rsa.misc.list_name", setter: fld_set}]}, + "listnum": {to:[{field: "rsa.misc.listnum", setter: fld_set}]}, + "load_data": {to:[{field: "rsa.misc.load_data", setter: fld_set}]}, + "location_floor": {to:[{field: "rsa.misc.location_floor", setter: fld_set}]}, + "location_mark": {to:[{field: "rsa.misc.location_mark", setter: fld_set}]}, + "log_id": {to:[{field: "rsa.misc.log_id", setter: fld_set}]}, + "log_type": {to:[{field: "rsa.misc.log_type", setter: fld_set}]}, + "logid": {to:[{field: "rsa.misc.logid", setter: fld_set}]}, + "logip": {to:[{field: "rsa.misc.logip", setter: fld_set}]}, + "logname": {to:[{field: "rsa.misc.logname", setter: fld_set}]}, + "logon_type": {to:[{field: "rsa.identity.logon_type", setter: fld_set}]}, + "logon_type_desc": {to:[{field: "rsa.identity.logon_type_desc", setter: fld_set}]}, + "longitude": {to:[{field: "rsa.misc.longitude", setter: fld_set}]}, + "lport": {to:[{field: "rsa.misc.lport", setter: fld_set}]}, + "lread": {convert: to_long, to:[{field: "rsa.db.lread", setter: fld_set}]}, + "lun": {to:[{field: "rsa.storage.lun", setter: fld_set}]}, + "lwrite": {convert: to_long, to:[{field: "rsa.db.lwrite", setter: fld_set}]}, + "macaddr": {convert: to_mac, to:[{field: "rsa.network.eth_host", setter: fld_set}]}, + "mail_id": {to:[{field: "rsa.misc.mail_id", setter: fld_set}]}, + "mask": {to:[{field: "rsa.network.mask", setter: fld_set}]}, + "match": {to:[{field: "rsa.misc.match", setter: fld_set}]}, + "mbug_data": {to:[{field: "rsa.misc.mbug_data", setter: fld_set}]}, + "mcb.req": {convert: to_long, to:[{field: "rsa.internal.mcb_req", setter: fld_set}]}, + "mcb.res": {convert: to_long, to:[{field: "rsa.internal.mcb_res", setter: fld_set}]}, + "mcbc.req": {convert: to_long, to:[{field: "rsa.internal.mcbc_req", setter: fld_set}]}, + "mcbc.res": {convert: to_long, to:[{field: "rsa.internal.mcbc_res", setter: fld_set}]}, + "medium": {convert: to_long, to:[{field: "rsa.internal.medium", setter: fld_set}]}, + "message": {to:[{field: "rsa.internal.message", setter: fld_set}]}, + "message_body": {to:[{field: "rsa.misc.message_body", setter: fld_set}]}, + "messageid": {to:[{field: "rsa.internal.messageid", setter: fld_set}]}, + "min": {to:[{field: "rsa.time.min", setter: fld_set}]}, + "misc": {to:[{field: "rsa.misc.misc", setter: fld_set}]}, + "misc_name": {to:[{field: "rsa.misc.misc_name", setter: fld_set}]}, + "mode": {to:[{field: "rsa.misc.mode", setter: fld_set}]}, + "month": {to:[{field: "rsa.time.month", setter: fld_set}]}, + "msg": {to:[{field: "rsa.internal.msg", setter: fld_set}]}, + "msgIdPart1": {to:[{field: "rsa.misc.msgIdPart1", setter: fld_set}]}, + "msgIdPart2": {to:[{field: "rsa.misc.msgIdPart2", setter: fld_set}]}, + "msgIdPart3": {to:[{field: "rsa.misc.msgIdPart3", setter: fld_set}]}, + "msgIdPart4": {to:[{field: "rsa.misc.msgIdPart4", setter: fld_set}]}, + "msg_id": {to:[{field: "rsa.internal.msg_id", setter: fld_set}]}, + "msg_type": {to:[{field: "rsa.misc.msg_type", setter: fld_set}]}, + "msgid": {to:[{field: "rsa.misc.msgid", setter: fld_set}]}, + "name": {to:[{field: "rsa.misc.name", setter: fld_set}]}, + "netname": {to:[{field: "rsa.network.netname", setter: fld_set}]}, + "netsessid": {to:[{field: "rsa.misc.netsessid", setter: fld_set}]}, + "network_port": {convert: to_long, to:[{field: "rsa.network.network_port", setter: fld_set}]}, + "network_service": {to:[{field: "rsa.network.network_service", setter: fld_set}]}, + "node": {to:[{field: "rsa.misc.node", setter: fld_set}]}, + "nodename": {to:[{field: "rsa.internal.node_name", setter: fld_set}]}, + "ntype": {to:[{field: "rsa.misc.ntype", setter: fld_set}]}, + "num": {to:[{field: "rsa.misc.num", setter: fld_set}]}, + "number": {to:[{field: "rsa.misc.number", setter: fld_set}]}, + "number1": {to:[{field: "rsa.misc.number1", setter: fld_set}]}, + "number2": {to:[{field: "rsa.misc.number2", setter: fld_set}]}, + "nwe.callback_id": {to:[{field: "rsa.internal.nwe_callback_id", setter: fld_set}]}, + "nwwn": {to:[{field: "rsa.misc.nwwn", setter: fld_set}]}, + "obj_id": {to:[{field: "rsa.internal.obj_id", setter: fld_set}]}, + "obj_name": {to:[{field: "rsa.misc.obj_name", setter: fld_set}]}, + "obj_server": {to:[{field: "rsa.internal.obj_server", setter: fld_set}]}, + "obj_type": {to:[{field: "rsa.misc.obj_type", setter: fld_set}]}, + "obj_value": {to:[{field: "rsa.internal.obj_val", setter: fld_set}]}, + "object": {to:[{field: "rsa.misc.object", setter: fld_set}]}, + "observed_val": {to:[{field: "rsa.misc.observed_val", setter: fld_set}]}, + "operation": {to:[{field: "rsa.misc.operation", setter: fld_set}]}, + "operation_id": {to:[{field: "rsa.misc.operation_id", setter: fld_set}]}, + "opkt": {to:[{field: "rsa.misc.opkt", setter: fld_set}]}, + "org.dst": {to:[{field: "rsa.physical.org_dst", setter: fld_prio, prio: 1}]}, + "org.src": {to:[{field: "rsa.physical.org_src", setter: fld_set}]}, + "org_dst": {to:[{field: "rsa.physical.org_dst", setter: fld_prio, prio: 0}]}, + "orig_from": {to:[{field: "rsa.misc.orig_from", setter: fld_set}]}, + "origin": {to:[{field: "rsa.network.origin", setter: fld_set}]}, + "original_owner": {to:[{field: "rsa.identity.owner", setter: fld_set}]}, + "os": {to:[{field: "rsa.misc.OS", setter: fld_set}]}, + "owner_id": {to:[{field: "rsa.misc.owner_id", setter: fld_set}]}, + "p_action": {to:[{field: "rsa.misc.p_action", setter: fld_set}]}, + "p_date": {to:[{field: "rsa.time.p_date", setter: fld_set}]}, + "p_filter": {to:[{field: "rsa.misc.p_filter", setter: fld_set}]}, + "p_group_object": {to:[{field: "rsa.misc.p_group_object", setter: fld_set}]}, + "p_id": {to:[{field: "rsa.misc.p_id", setter: fld_set}]}, + "p_month": {to:[{field: "rsa.time.p_month", setter: fld_set}]}, + "p_msgid": {to:[{field: "rsa.misc.p_msgid", setter: fld_set}]}, + "p_msgid1": {to:[{field: "rsa.misc.p_msgid1", setter: fld_set}]}, + "p_msgid2": {to:[{field: "rsa.misc.p_msgid2", setter: fld_set}]}, + "p_result1": {to:[{field: "rsa.misc.p_result1", setter: fld_set}]}, + "p_time": {to:[{field: "rsa.time.p_time", setter: fld_set}]}, + "p_time1": {to:[{field: "rsa.time.p_time1", setter: fld_set}]}, + "p_time2": {to:[{field: "rsa.time.p_time2", setter: fld_set}]}, + "p_url": {to:[{field: "rsa.web.p_url", setter: fld_set}]}, + "p_user_agent": {to:[{field: "rsa.web.p_user_agent", setter: fld_set}]}, + "p_web_cookie": {to:[{field: "rsa.web.p_web_cookie", setter: fld_set}]}, + "p_web_method": {to:[{field: "rsa.web.p_web_method", setter: fld_set}]}, + "p_web_referer": {to:[{field: "rsa.web.p_web_referer", setter: fld_set}]}, + "p_year": {to:[{field: "rsa.time.p_year", setter: fld_set}]}, + "packet_length": {to:[{field: "rsa.network.packet_length", setter: fld_set}]}, + "paddr": {convert: to_ip, to:[{field: "rsa.network.paddr", setter: fld_set}]}, + "param": {to:[{field: "rsa.misc.param", setter: fld_set}]}, + "param.dst": {to:[{field: "rsa.misc.param_dst", setter: fld_set}]}, + "param.src": {to:[{field: "rsa.misc.param_src", setter: fld_set}]}, + "parent_node": {to:[{field: "rsa.misc.parent_node", setter: fld_set}]}, + "parse.error": {to:[{field: "rsa.internal.parse_error", setter: fld_set}]}, + "password": {to:[{field: "rsa.identity.password", setter: fld_set}]}, + "password_chg": {to:[{field: "rsa.misc.password_chg", setter: fld_set}]}, + "password_expire": {to:[{field: "rsa.misc.password_expire", setter: fld_set}]}, + "patient_fname": {to:[{field: "rsa.healthcare.patient_fname", setter: fld_set}]}, + "patient_id": {to:[{field: "rsa.healthcare.patient_id", setter: fld_set}]}, + "patient_lname": {to:[{field: "rsa.healthcare.patient_lname", setter: fld_set}]}, + "patient_mname": {to:[{field: "rsa.healthcare.patient_mname", setter: fld_set}]}, + "payload.req": {convert: to_long, to:[{field: "rsa.internal.payload_req", setter: fld_set}]}, + "payload.res": {convert: to_long, to:[{field: "rsa.internal.payload_res", setter: fld_set}]}, + "peer": {to:[{field: "rsa.crypto.peer", setter: fld_set}]}, + "peer_id": {to:[{field: "rsa.crypto.peer_id", setter: fld_set}]}, + "permgranted": {to:[{field: "rsa.misc.permgranted", setter: fld_set}]}, + "permissions": {to:[{field: "rsa.db.permissions", setter: fld_set}]}, + "permwanted": {to:[{field: "rsa.misc.permwanted", setter: fld_set}]}, + "pgid": {to:[{field: "rsa.misc.pgid", setter: fld_set}]}, + "phone_number": {to:[{field: "rsa.misc.phone", setter: fld_prio, prio: 2}]}, + "phost": {to:[{field: "rsa.network.phost", setter: fld_set}]}, + "pid": {to:[{field: "rsa.misc.pid", setter: fld_set}]}, + "policy": {to:[{field: "rsa.misc.policy", setter: fld_set}]}, + "policyUUID": {to:[{field: "rsa.misc.policyUUID", setter: fld_set}]}, + "policy_id": {to:[{field: "rsa.misc.policy_id", setter: fld_set}]}, + "policy_value": {to:[{field: "rsa.misc.policy_value", setter: fld_set}]}, + "policy_waiver": {to:[{field: "rsa.misc.policy_waiver", setter: fld_set}]}, + "policyname": {to:[{field: "rsa.misc.policy_name", setter: fld_prio, prio: 0}]}, + "pool_id": {to:[{field: "rsa.misc.pool_id", setter: fld_set}]}, + "pool_name": {to:[{field: "rsa.misc.pool_name", setter: fld_set}]}, + "port": {convert: to_long, to:[{field: "rsa.network.port", setter: fld_set}]}, + "portname": {to:[{field: "rsa.misc.port_name", setter: fld_set}]}, + "pread": {convert: to_long, to:[{field: "rsa.db.pread", setter: fld_set}]}, + "priority": {to:[{field: "rsa.misc.priority", setter: fld_set}]}, + "privilege": {to:[{field: "rsa.file.privilege", setter: fld_set}]}, + "process.vid.dst": {to:[{field: "rsa.internal.process_vid_dst", setter: fld_set}]}, + "process.vid.src": {to:[{field: "rsa.internal.process_vid_src", setter: fld_set}]}, + "process_id_val": {to:[{field: "rsa.misc.process_id_val", setter: fld_set}]}, + "processing_time": {to:[{field: "rsa.time.process_time", setter: fld_set}]}, + "profile": {to:[{field: "rsa.identity.profile", setter: fld_set}]}, + "prog_asp_num": {to:[{field: "rsa.misc.prog_asp_num", setter: fld_set}]}, + "program": {to:[{field: "rsa.misc.program", setter: fld_set}]}, + "protocol_detail": {to:[{field: "rsa.network.protocol_detail", setter: fld_set}]}, + "pwwn": {to:[{field: "rsa.storage.pwwn", setter: fld_set}]}, + "r_hostid": {to:[{field: "rsa.network.alias_host", setter: fld_append}]}, + "real_data": {to:[{field: "rsa.misc.real_data", setter: fld_set}]}, + "realm": {to:[{field: "rsa.identity.realm", setter: fld_set}]}, + "reason": {to:[{field: "rsa.misc.reason", setter: fld_set}]}, + "rec_asp_device": {to:[{field: "rsa.misc.rec_asp_device", setter: fld_set}]}, + "rec_asp_num": {to:[{field: "rsa.misc.rec_asp_num", setter: fld_set}]}, + "rec_library": {to:[{field: "rsa.misc.rec_library", setter: fld_set}]}, + "recorded_time": {convert: to_date, to:[{field: "rsa.time.recorded_time", setter: fld_set}]}, + "recordnum": {to:[{field: "rsa.misc.recordnum", setter: fld_set}]}, + "registry.key": {to:[{field: "rsa.endpoint.registry_key", setter: fld_set}]}, + "registry.value": {to:[{field: "rsa.endpoint.registry_value", setter: fld_set}]}, + "remote_domain": {to:[{field: "rsa.web.remote_domain", setter: fld_set}]}, + "remote_domain_id": {to:[{field: "rsa.network.remote_domain_id", setter: fld_set}]}, + "reputation_num": {convert: to_double, to:[{field: "rsa.web.reputation_num", setter: fld_set}]}, + "resource": {to:[{field: "rsa.internal.resource", setter: fld_set}]}, + "resource_class": {to:[{field: "rsa.internal.resource_class", setter: fld_set}]}, + "result": {to:[{field: "rsa.misc.result", setter: fld_set}]}, + "result_code": {to:[{field: "rsa.misc.result_code", setter: fld_prio, prio: 1}]}, + "resultcode": {to:[{field: "rsa.misc.result_code", setter: fld_prio, prio: 0}]}, + "rid": {convert: to_long, to:[{field: "rsa.internal.rid", setter: fld_set}]}, + "risk": {to:[{field: "rsa.misc.risk", setter: fld_set}]}, + "risk_info": {to:[{field: "rsa.misc.risk_info", setter: fld_set}]}, + "risk_num": {convert: to_double, to:[{field: "rsa.misc.risk_num", setter: fld_set}]}, + "risk_num_comm": {convert: to_double, to:[{field: "rsa.misc.risk_num_comm", setter: fld_set}]}, + "risk_num_next": {convert: to_double, to:[{field: "rsa.misc.risk_num_next", setter: fld_set}]}, + "risk_num_sand": {convert: to_double, to:[{field: "rsa.misc.risk_num_sand", setter: fld_set}]}, + "risk_num_static": {convert: to_double, to:[{field: "rsa.misc.risk_num_static", setter: fld_set}]}, + "risk_suspicious": {to:[{field: "rsa.misc.risk_suspicious", setter: fld_set}]}, + "risk_warning": {to:[{field: "rsa.misc.risk_warning", setter: fld_set}]}, + "rpayload": {to:[{field: "rsa.network.rpayload", setter: fld_set}]}, + "ruid": {to:[{field: "rsa.misc.ruid", setter: fld_set}]}, + "rule": {to:[{field: "rsa.misc.rule", setter: fld_set}]}, + "rule_group": {to:[{field: "rsa.misc.rule_group", setter: fld_set}]}, + "rule_template": {to:[{field: "rsa.misc.rule_template", setter: fld_set}]}, + "rule_uid": {to:[{field: "rsa.misc.rule_uid", setter: fld_set}]}, + "rulename": {to:[{field: "rsa.misc.rule_name", setter: fld_set}]}, + "s_certauth": {to:[{field: "rsa.crypto.s_certauth", setter: fld_set}]}, + "s_cipher": {to:[{field: "rsa.crypto.cipher_src", setter: fld_set}]}, + "s_ciphersize": {convert: to_long, to:[{field: "rsa.crypto.cipher_size_src", setter: fld_set}]}, + "s_context": {to:[{field: "rsa.misc.context_subject", setter: fld_set}]}, + "s_sslver": {to:[{field: "rsa.crypto.ssl_ver_src", setter: fld_set}]}, + "sburb": {to:[{field: "rsa.misc.sburb", setter: fld_set}]}, + "scheme": {to:[{field: "rsa.crypto.scheme", setter: fld_set}]}, + "sdomain_fld": {to:[{field: "rsa.misc.sdomain_fld", setter: fld_set}]}, + "search.text": {to:[{field: "rsa.misc.search_text", setter: fld_set}]}, + "sec": {to:[{field: "rsa.misc.sec", setter: fld_set}]}, + "second": {to:[{field: "rsa.misc.second", setter: fld_set}]}, + "sensor": {to:[{field: "rsa.misc.sensor", setter: fld_set}]}, + "sensorname": {to:[{field: "rsa.misc.sensorname", setter: fld_set}]}, + "seqnum": {to:[{field: "rsa.misc.seqnum", setter: fld_set}]}, + "serial_number": {to:[{field: "rsa.misc.serial_number", setter: fld_set}]}, + "service.account": {to:[{field: "rsa.identity.service_account", setter: fld_set}]}, + "session": {to:[{field: "rsa.misc.session", setter: fld_set}]}, + "session.split": {to:[{field: "rsa.internal.session_split", setter: fld_set}]}, + "sessionid": {to:[{field: "rsa.misc.log_session_id", setter: fld_set}]}, + "sessionid1": {to:[{field: "rsa.misc.log_session_id1", setter: fld_set}]}, + "sessiontype": {to:[{field: "rsa.misc.sessiontype", setter: fld_set}]}, + "severity": {to:[{field: "rsa.misc.severity", setter: fld_set}]}, + "sid": {to:[{field: "rsa.identity.user_sid_dst", setter: fld_set}]}, + "sig.name": {to:[{field: "rsa.misc.sig_name", setter: fld_set}]}, + "sigUUID": {to:[{field: "rsa.misc.sigUUID", setter: fld_set}]}, + "sigcat": {to:[{field: "rsa.misc.sigcat", setter: fld_set}]}, + "sigid": {convert: to_long, to:[{field: "rsa.misc.sig_id", setter: fld_set}]}, + "sigid1": {convert: to_long, to:[{field: "rsa.misc.sig_id1", setter: fld_set}]}, + "sigid_string": {to:[{field: "rsa.misc.sig_id_str", setter: fld_set}]}, + "signame": {to:[{field: "rsa.misc.policy_name", setter: fld_prio, prio: 1}]}, + "sigtype": {to:[{field: "rsa.crypto.sig_type", setter: fld_set}]}, + "sinterface": {to:[{field: "rsa.network.sinterface", setter: fld_set}]}, + "site": {to:[{field: "rsa.internal.site", setter: fld_set}]}, + "size": {convert: to_long, to:[{field: "rsa.internal.size", setter: fld_set}]}, + "smask": {to:[{field: "rsa.network.smask", setter: fld_set}]}, + "snmp.oid": {to:[{field: "rsa.misc.snmp_oid", setter: fld_set}]}, + "snmp.value": {to:[{field: "rsa.misc.snmp_value", setter: fld_set}]}, + "sourcefile": {to:[{field: "rsa.internal.sourcefile", setter: fld_set}]}, + "space": {to:[{field: "rsa.misc.space", setter: fld_set}]}, + "space1": {to:[{field: "rsa.misc.space1", setter: fld_set}]}, + "spi": {to:[{field: "rsa.misc.spi", setter: fld_set}]}, + "sql": {to:[{field: "rsa.misc.sql", setter: fld_set}]}, + "src_dn": {to:[{field: "rsa.identity.dn_src", setter: fld_set}]}, + "src_payload": {to:[{field: "rsa.misc.payload_src", setter: fld_set}]}, + "src_spi": {to:[{field: "rsa.misc.spi_src", setter: fld_set}]}, + "src_zone": {to:[{field: "rsa.network.zone_src", setter: fld_set}]}, + "srcburb": {to:[{field: "rsa.misc.srcburb", setter: fld_set}]}, + "srcdom": {to:[{field: "rsa.misc.srcdom", setter: fld_set}]}, + "srcservice": {to:[{field: "rsa.misc.srcservice", setter: fld_set}]}, + "ssid": {to:[{field: "rsa.wireless.wlan_ssid", setter: fld_prio, prio: 0}]}, + "stamp": {convert: to_date, to:[{field: "rsa.time.stamp", setter: fld_set}]}, + "starttime": {convert: to_date, to:[{field: "rsa.time.starttime", setter: fld_set}]}, + "state": {to:[{field: "rsa.misc.state", setter: fld_set}]}, + "statement": {to:[{field: "rsa.internal.statement", setter: fld_set}]}, + "status": {to:[{field: "rsa.misc.status", setter: fld_set}]}, + "status1": {to:[{field: "rsa.misc.status1", setter: fld_set}]}, + "streams": {convert: to_long, to:[{field: "rsa.misc.streams", setter: fld_set}]}, + "subcategory": {to:[{field: "rsa.misc.subcategory", setter: fld_set}]}, + "subject": {to:[{field: "rsa.email.subject", setter: fld_set}]}, + "svcno": {to:[{field: "rsa.misc.svcno", setter: fld_set}]}, + "system": {to:[{field: "rsa.misc.system", setter: fld_set}]}, + "t_context": {to:[{field: "rsa.misc.context_target", setter: fld_set}]}, + "task_name": {to:[{field: "rsa.file.task_name", setter: fld_set}]}, + "tbdstr1": {to:[{field: "rsa.misc.tbdstr1", setter: fld_set}]}, + "tbdstr2": {to:[{field: "rsa.misc.tbdstr2", setter: fld_set}]}, + "tbl_name": {to:[{field: "rsa.db.table_name", setter: fld_set}]}, + "tcp_flags": {convert: to_long, to:[{field: "rsa.misc.tcp_flags", setter: fld_set}]}, + "terminal": {to:[{field: "rsa.misc.terminal", setter: fld_set}]}, + "tgtdom": {to:[{field: "rsa.misc.tgtdom", setter: fld_set}]}, + "tgtdomain": {to:[{field: "rsa.misc.tgtdomain", setter: fld_set}]}, + "threat_name": {to:[{field: "rsa.threat.threat_category", setter: fld_set}]}, + "threat_source": {to:[{field: "rsa.threat.threat_source", setter: fld_set}]}, + "threat_val": {to:[{field: "rsa.threat.threat_desc", setter: fld_set}]}, + "threshold": {to:[{field: "rsa.misc.threshold", setter: fld_set}]}, + "time": {convert: to_date, to:[{field: "rsa.internal.time", setter: fld_set}]}, + "timestamp": {to:[{field: "rsa.time.timestamp", setter: fld_set}]}, + "timezone": {to:[{field: "rsa.time.timezone", setter: fld_set}]}, + "to": {to:[{field: "rsa.email.email_dst", setter: fld_set}]}, + "tos": {convert: to_long, to:[{field: "rsa.misc.tos", setter: fld_set}]}, + "trans_from": {to:[{field: "rsa.email.trans_from", setter: fld_set}]}, + "trans_id": {to:[{field: "rsa.db.transact_id", setter: fld_set}]}, + "trans_to": {to:[{field: "rsa.email.trans_to", setter: fld_set}]}, + "trigger_desc": {to:[{field: "rsa.misc.trigger_desc", setter: fld_set}]}, + "trigger_val": {to:[{field: "rsa.misc.trigger_val", setter: fld_set}]}, + "type": {to:[{field: "rsa.misc.type", setter: fld_set}]}, + "type1": {to:[{field: "rsa.misc.type1", setter: fld_set}]}, + "tzone": {to:[{field: "rsa.time.tzone", setter: fld_set}]}, + "ubc.req": {convert: to_long, to:[{field: "rsa.internal.ubc_req", setter: fld_set}]}, + "ubc.res": {convert: to_long, to:[{field: "rsa.internal.ubc_res", setter: fld_set}]}, + "udb_class": {to:[{field: "rsa.misc.udb_class", setter: fld_set}]}, + "url_fld": {to:[{field: "rsa.misc.url_fld", setter: fld_set}]}, + "urlpage": {to:[{field: "rsa.web.urlpage", setter: fld_set}]}, + "urlroot": {to:[{field: "rsa.web.urlroot", setter: fld_set}]}, + "user_address": {to:[{field: "rsa.email.email", setter: fld_append}]}, + "user_dept": {to:[{field: "rsa.identity.user_dept", setter: fld_set}]}, + "user_div": {to:[{field: "rsa.misc.user_div", setter: fld_set}]}, + "user_fname": {to:[{field: "rsa.identity.firstname", setter: fld_set}]}, + "user_lname": {to:[{field: "rsa.identity.lastname", setter: fld_set}]}, + "user_mname": {to:[{field: "rsa.identity.middlename", setter: fld_set}]}, + "user_org": {to:[{field: "rsa.identity.org", setter: fld_set}]}, + "user_role": {to:[{field: "rsa.identity.user_role", setter: fld_set}]}, + "userid": {to:[{field: "rsa.misc.userid", setter: fld_set}]}, + "username_fld": {to:[{field: "rsa.misc.username_fld", setter: fld_set}]}, + "utcstamp": {to:[{field: "rsa.misc.utcstamp", setter: fld_set}]}, + "v_instafname": {to:[{field: "rsa.misc.v_instafname", setter: fld_set}]}, + "vendor_event_cat": {to:[{field: "rsa.investigations.event_vcat", setter: fld_set}]}, + "version": {to:[{field: "rsa.misc.version", setter: fld_set}]}, + "vid": {to:[{field: "rsa.internal.msg_vid", setter: fld_set}]}, + "virt_data": {to:[{field: "rsa.misc.virt_data", setter: fld_set}]}, + "virusname": {to:[{field: "rsa.misc.virusname", setter: fld_set}]}, + "vlan": {convert: to_long, to:[{field: "rsa.network.vlan", setter: fld_set}]}, + "vlan.name": {to:[{field: "rsa.network.vlan_name", setter: fld_set}]}, + "vm_target": {to:[{field: "rsa.misc.vm_target", setter: fld_set}]}, + "vpnid": {to:[{field: "rsa.misc.vpnid", setter: fld_set}]}, + "vsys": {to:[{field: "rsa.misc.vsys", setter: fld_set}]}, + "vuln_ref": {to:[{field: "rsa.misc.vuln_ref", setter: fld_set}]}, + "web_cookie": {to:[{field: "rsa.web.web_cookie", setter: fld_set}]}, + "web_extension_tmp": {to:[{field: "rsa.web.web_extension_tmp", setter: fld_set}]}, + "web_host": {to:[{field: "rsa.web.alias_host", setter: fld_set}]}, + "web_method": {to:[{field: "rsa.misc.action", setter: fld_append}]}, + "web_page": {to:[{field: "rsa.web.web_page", setter: fld_set}]}, + "web_ref_domain": {to:[{field: "rsa.web.web_ref_domain", setter: fld_set}]}, + "web_ref_host": {to:[{field: "rsa.network.alias_host", setter: fld_append}]}, + "web_ref_page": {to:[{field: "rsa.web.web_ref_page", setter: fld_set}]}, + "web_ref_query": {to:[{field: "rsa.web.web_ref_query", setter: fld_set}]}, + "web_ref_root": {to:[{field: "rsa.web.web_ref_root", setter: fld_set}]}, + "wifi_channel": {convert: to_long, to:[{field: "rsa.wireless.wlan_channel", setter: fld_set}]}, + "wlan": {to:[{field: "rsa.wireless.wlan_name", setter: fld_set}]}, + "word": {to:[{field: "rsa.internal.word", setter: fld_set}]}, + "workspace_desc": {to:[{field: "rsa.misc.workspace", setter: fld_set}]}, + "workstation": {to:[{field: "rsa.network.alias_host", setter: fld_append}]}, + "year": {to:[{field: "rsa.time.year", setter: fld_set}]}, + "zone": {to:[{field: "rsa.network.zone", setter: fld_set}]}, + }; + + function to_date(value) { + switch (typeof (value)) { + case "object": + // This is a Date. But as it was obtained from evt.Get(), the VM + // doesn't see it as a JS Date anymore, thus value instanceof Date === false. + // Have to trust that any object here is a valid Date for Go. + return value; + case "string": + var asDate = new Date(value); + if (!isNaN(asDate)) return asDate; + } + } + + // ECMAScript 5.1 doesn't have Object.MAX_SAFE_INTEGER / Object.MIN_SAFE_INTEGER. + var maxSafeInt = Math.pow(2, 53) - 1; + var minSafeInt = -maxSafeInt; + + function to_long(value) { + var num = parseInt(value); + // Better not to index a number if it's not safe (above 53 bits). + return !isNaN(num) && minSafeInt <= num && num <= maxSafeInt ? num : undefined; + } + + function to_ip(value) { + if (value.indexOf(":") === -1) + return to_ipv4(value); + return to_ipv6(value); + } + + var ipv4_regex = /^(\d+)\.(\d+)\.(\d+)\.(\d+)$/; + var ipv6_hex_regex = /^[0-9A-Fa-f]{1,4}$/; + + function to_ipv4(value) { + var result = ipv4_regex.exec(value); + if (result == null || result.length !== 5) return; + for (var i = 1; i < 5; i++) { + var num = strictToInt(result[i]); + if (isNaN(num) || num < 0 || num > 255) return; + } + return value; + } + + function to_ipv6(value) { + var sqEnd = value.indexOf("]"); + if (sqEnd > -1) { + if (value.charAt(0) !== "[") return; + value = value.substr(1, sqEnd - 1); + } + var zoneOffset = value.indexOf("%"); + if (zoneOffset > -1) { + value = value.substr(0, zoneOffset); + } + var parts = value.split(":"); + if (parts == null || parts.length < 3 || parts.length > 8) return; + var numEmpty = 0; + var innerEmpty = 0; + for (var i = 0; i < parts.length; i++) { + if (parts[i].length === 0) { + numEmpty++; + if (i > 0 && i + 1 < parts.length) innerEmpty++; + } else if (!parts[i].match(ipv6_hex_regex) && + // Accept an IPv6 with a valid IPv4 at the end. + ((i + 1 < parts.length) || !to_ipv4(parts[i]))) { + return; + } + } + return innerEmpty === 0 && parts.length === 8 || innerEmpty === 1 ? value : undefined; + } + + function to_double(value) { + return parseFloat(value); + } + + function to_mac(value) { + // ES doesn't have a mac datatype so it's safe to ingest whatever was captured. + return value; + } + + function to_lowercase(value) { + // to_lowercase is used against keyword fields, which can accept + // any other type (numbers, dates). + return typeof(value) === "string"? value.toLowerCase() : value; + } + + function fld_set(dst, value) { + dst[this.field] = { v: value }; + } + + function fld_append(dst, value) { + if (dst[this.field] === undefined) { + dst[this.field] = { v: [value] }; + } else { + var base = dst[this.field]; + if (base.v.indexOf(value)===-1) base.v.push(value); + } + } + + function fld_prio(dst, value) { + if (dst[this.field] === undefined) { + dst[this.field] = { v: value, prio: this.prio}; + } else if(this.prio < dst[this.field].prio) { + dst[this.field].v = value; + dst[this.field].prio = this.prio; + } + } + + var valid_ecs_outcome = { + 'failure': true, + 'success': true, + 'unknown': true + }; + + function fld_ecs_outcome(dst, value) { + value = value.toLowerCase(); + if (valid_ecs_outcome[value] === undefined) { + value = 'unknown'; + } + if (dst[this.field] === undefined) { + dst[this.field] = { v: value }; + } else if (dst[this.field].v === 'unknown') { + dst[this.field] = { v: value }; + } + } + + function map_all(evt, targets, value) { + for (var i = 0; i < targets.length; i++) { + evt.Put(targets[i], value); + } + } + + function populate_fields(evt) { + var base = evt.Get(FIELDS_OBJECT); + if (base === null) return; + alternate_datetime(evt); + if (map_ecs) { + do_populate(evt, base, ecs_mappings); + } + if (map_rsa) { + do_populate(evt, base, rsa_mappings); + } + if (keep_raw) { + evt.Put("rsa.raw", base); + } + evt.Delete(FIELDS_OBJECT); + } + + var datetime_alt_components = [ + {field: "day", fmts: [[dF]]}, + {field: "year", fmts: [[dW]]}, + {field: "month", fmts: [[dB],[dG]]}, + {field: "date", fmts: [[dW,dSkip,dG,dSkip,dF],[dW,dSkip,dB,dSkip,dF],[dW,dSkip,dR,dSkip,dF]]}, + {field: "hour", fmts: [[dN]]}, + {field: "min", fmts: [[dU]]}, + {field: "secs", fmts: [[dO]]}, + {field: "time", fmts: [[dN, dSkip, dU, dSkip, dO]]}, + ]; + + function alternate_datetime(evt) { + if (evt.Get(FIELDS_PREFIX + "event_time") != null) { + return; + } + var tzOffset = tz_offset; + if (tzOffset === "event") { + tzOffset = evt.Get("event.timezone"); + } + var container = new DateContainer(tzOffset); + for (var i=0; i} %{timezone}]||%{web_method}||%{web_host}||%{webpage}||%{web_query}||%{network_service}||%{resultcode}||%{sbytes}||%{web_referer}||%{user_agent}||%{web_cookie}", processor_chain([ + dup1, + dup2, + dup3, + dup4, + dup5, + dup6, + ])); + + var hdr1 = match("HEADER#0:0001", "message", "%APACHETOMCAT-%{level}-%{messageid}: %{payload}", processor_chain([ + setc("header_id","0001"), + ])); + + var hdr2 = match("HEADER#1:0002", "message", "%{hmonth->} %{hday->} %{htime->} %{hostname->} %APACHETOMCAT- %{messageid}: %{payload}", processor_chain([ + setc("header_id","0002"), + ])); + + var select1 = linear_select([ + hdr1, + hdr2, + ]); + + var msg1 = msg("ABCD", dup7); + + var msg2 = msg("BADMETHOD", dup7); + + var msg3 = msg("BADMTHD", dup7); + + var msg4 = msg("BDMTHD", dup7); + + var msg5 = msg("INDEX", dup7); + + var msg6 = msg("CFYZ", dup7); + + var msg7 = msg("CONNECT", dup7); + + var msg8 = msg("DELETE", dup7); + + var msg9 = msg("DETECT_METHOD_TYPE", dup7); + + var msg10 = msg("FGET", dup7); + + var msg11 = msg("GET", dup7); + + var msg12 = msg("get", dup7); + + var msg13 = msg("HEAD", dup7); + + var msg14 = msg("id", dup7); + + var msg15 = msg("LOCK", dup7); + + var msg16 = msg("MKCOL", dup7); + + var msg17 = msg("NCIRCLE", dup7); + + var msg18 = msg("OPTIONS", dup7); + + var msg19 = msg("POST", dup7); + + var msg20 = msg("PRONECT", dup7); + + var msg21 = msg("PROPFIND", dup7); + + var msg22 = msg("PUT", dup7); + + var msg23 = msg("QUALYS", dup7); + + var msg24 = msg("SEARCH", dup7); + + var msg25 = msg("TRACK", dup7); + + var msg26 = msg("TRACE", dup7); + + var msg27 = msg("uGET", dup7); + + var msg28 = msg("null", dup7); + + var msg29 = msg("rndmmtd", dup7); + + var msg30 = msg("RNDMMTD", dup7); + + var msg31 = msg("asdf", dup7); + + var msg32 = msg("DEBUG", dup7); + + var msg33 = msg("COOK", dup7); + + var msg34 = msg("nGET", dup7); + + var chain1 = processor_chain([ + select1, + msgid_select({ + "ABCD": msg1, + "BADMETHOD": msg2, + "BADMTHD": msg3, + "BDMTHD": msg4, + "CFYZ": msg6, + "CONNECT": msg7, + "COOK": msg33, + "DEBUG": msg32, + "DELETE": msg8, + "DETECT_METHOD_TYPE": msg9, + "FGET": msg10, + "GET": msg11, + "HEAD": msg13, + "INDEX": msg5, + "LOCK": msg15, + "MKCOL": msg16, + "NCIRCLE": msg17, + "OPTIONS": msg18, + "POST": msg19, + "PRONECT": msg20, + "PROPFIND": msg21, + "PUT": msg22, + "QUALYS": msg23, + "RNDMMTD": msg30, + "SEARCH": msg24, + "TRACE": msg26, + "TRACK": msg25, + "asdf": msg31, + "get": msg12, + "id": msg14, + "nGET": msg34, + "null": msg28, + "rndmmtd": msg29, + "uGET": msg27, + }), + ]); + + var part1 = match("MESSAGE#0:ABCD", "nwparser.payload", "%{saddr}||%{fld5}||%{username}||[%{fld7->} %{timezone}]||%{web_method}||%{web_host}||%{webpage}||%{web_query}||%{network_service}||%{resultcode}||%{sbytes}||%{web_referer}||%{user_agent}||%{web_cookie}", processor_chain([ + dup1, + dup2, + dup3, + dup4, + dup5, + dup6, + ])); + - community_id: null + - registered_domain: + field: dns.question.name + ignore_failure: true + ignore_missing: true + target_etld_field: dns.question.top_level_domain + target_field: dns.question.registered_domain + target_subdomain_field: dns.question.subdomain + - registered_domain: + field: client.domain + ignore_failure: true + ignore_missing: true + target_etld_field: client.top_level_domain + target_field: client.registered_domain + target_subdomain_field: client.subdomain + - registered_domain: + field: server.domain + ignore_failure: true + ignore_missing: true + target_etld_field: server.top_level_domain + target_field: server.registered_domain + target_subdomain_field: server.subdomain + - registered_domain: + field: destination.domain + ignore_failure: true + ignore_missing: true + target_etld_field: destination.top_level_domain + target_field: destination.registered_domain + target_subdomain_field: destination.subdomain + - registered_domain: + field: source.domain + ignore_failure: true + ignore_missing: true + target_etld_field: source.top_level_domain + target_field: source.registered_domain + target_subdomain_field: source.subdomain + - registered_domain: + field: url.domain + ignore_failure: true + ignore_missing: true + target_etld_field: url.top_level_domain + target_field: url.registered_domain + target_subdomain_field: url.subdomain + - add_locale: null + prospector: + scanner: + symlinks: true + tags: + - tomcat-log + - forwarded + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/traefik.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/traefik.yml new file mode 100644 index 00000000000..4ab26982389 --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/traefik.yml @@ -0,0 +1,37 @@ +inputs: + - name: filestream-traefik + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.traefik.access.enabled} == true or ${kubernetes.hints.traefik.enabled} == true + data_stream: + dataset: traefik.access + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.traefik.access.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + data_stream.namespace: default + - name: traefik/metrics-traefik + type: traefik/metrics + use_output: default + streams: + - condition: ${kubernetes.hints.traefik.health.enabled} == true or ${kubernetes.hints.traefik.enabled} == true + data_stream: + dataset: traefik.health + type: metrics + hosts: + - ${kubernetes.hints.traefik.health.host|'localhost:8080'} + metricsets: + - health + period: ${kubernetes.hints.traefik.health.period|'10s'} + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/udp.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/udp.yml new file mode 100644 index 00000000000..60fa5ebf598 --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/udp.yml @@ -0,0 +1,33 @@ +inputs: + - name: udp-udp + type: udp + use_output: default + streams: + - condition: ${kubernetes.hints.udp.generic.enabled} == true or ${kubernetes.hints.udp.enabled} == true + data_stream: + dataset: udp.generic + type: logs + host: localhost:8080 + max_message_size: 10KiB + data_stream.namespace: default + - name: filestream-udp + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.udp.container_logs.enabled} == true + data_stream: + dataset: kubernetes.container_logs + type: logs + exclude_files: [] + exclude_lines: [] + parsers: + - container: + format: auto + stream: all + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + prospector: + scanner: + symlinks: true + tags: [] + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/zeek.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/zeek.yml new file mode 100644 index 00000000000..22bcc875894 --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/zeek.yml @@ -0,0 +1,2271 @@ +inputs: + - name: filestream-zeek + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.zeek.capture_loss.enabled} == true or ${kubernetes.hints.zeek.enabled} == true + data_stream: + dataset: zeek.capture_loss + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.zeek.capture_loss.stream|'all'} + paths: + - /var/log/bro/current/capture_loss.log + - /opt/zeek/logs/current/capture_loss.log + - /usr/local/var/spool/zeek/capture_loss.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - zeek-capture-loss + - condition: ${kubernetes.hints.zeek.connection.enabled} == true or ${kubernetes.hints.zeek.enabled} == true + data_stream: + dataset: zeek.connection + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.zeek.connection.stream|'all'} + paths: + - /var/log/bro/current/conn.log + - /opt/zeek/logs/current/conn.log + - /usr/local/var/spool/zeek/conn.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - zeek-connection + - condition: ${kubernetes.hints.zeek.dce_rpc.enabled} == true or ${kubernetes.hints.zeek.enabled} == true + data_stream: + dataset: zeek.dce_rpc + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.zeek.dce_rpc.stream|'all'} + paths: + - /var/log/bro/current/dce_rpc.log + - /opt/zeek/logs/current/dce_rpc.log + - /usr/local/var/spool/zeek/dce_rpc.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - zeek-dce-rpc + - condition: ${kubernetes.hints.zeek.dhcp.enabled} == true or ${kubernetes.hints.zeek.enabled} == true + data_stream: + dataset: zeek.dhcp + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.zeek.dhcp.stream|'all'} + paths: + - /var/log/bro/current/dhcp.log + - /opt/zeek/logs/current/dhcp.log + - /usr/local/var/spool/zeek/dhcp.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - zeek-dhcp + - condition: ${kubernetes.hints.zeek.dnp3.enabled} == true or ${kubernetes.hints.zeek.enabled} == true + data_stream: + dataset: zeek.dnp3 + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.zeek.dnp3.stream|'all'} + paths: + - /var/log/bro/current/dnp3.log + - /opt/zeek/logs/current/dnp3.log + - /usr/local/var/spool/zeek/dnp3.log + prospector: + scanner: + symlinks: true + tags: + - zeek-dnp3 + - condition: ${kubernetes.hints.zeek.dns.enabled} == true or ${kubernetes.hints.zeek.enabled} == true + data_stream: + dataset: zeek.dns + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.zeek.dns.stream|'all'} + paths: + - /var/log/bro/current/dns.log + - /opt/zeek/logs/current/dns.log + - /usr/local/var/spool/zeek/dns.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - zeek-dns + - condition: ${kubernetes.hints.zeek.dpd.enabled} == true or ${kubernetes.hints.zeek.enabled} == true + data_stream: + dataset: zeek.dpd + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.zeek.dpd.stream|'all'} + paths: + - /var/log/bro/current/dpd.log + - /opt/zeek/logs/current/dpd.log + - /usr/local/var/spool/zeek/dpd.log + prospector: + scanner: + symlinks: true + tags: + - zeek-dpd + - condition: ${kubernetes.hints.zeek.files.enabled} == true or ${kubernetes.hints.zeek.enabled} == true + data_stream: + dataset: zeek.files + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.zeek.files.stream|'all'} + paths: + - /var/log/bro/current/files.log + - /opt/zeek/logs/current/files.log + - /usr/local/var/spool/zeek/files.log + prospector: + scanner: + symlinks: true + tags: + - zeek-files + - condition: ${kubernetes.hints.zeek.ftp.enabled} == true or ${kubernetes.hints.zeek.enabled} == true + data_stream: + dataset: zeek.ftp + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.zeek.ftp.stream|'all'} + paths: + - /var/log/bro/current/ftp.log + - /opt/zeek/logs/current/ftp.log + - /usr/local/var/spool/zeek/ftp.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - zeek-ftp + - condition: ${kubernetes.hints.zeek.http.enabled} == true or ${kubernetes.hints.zeek.enabled} == true + data_stream: + dataset: zeek.http + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.zeek.http.stream|'all'} + paths: + - /var/log/bro/current/http.log + - /opt/zeek/logs/current/http.log + - /usr/local/var/spool/zeek/http.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - zeek-http + - condition: ${kubernetes.hints.zeek.intel.enabled} == true or ${kubernetes.hints.zeek.enabled} == true + data_stream: + dataset: zeek.intel + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.zeek.intel.stream|'all'} + paths: + - /var/log/bro/current/intel.log + - /opt/zeek/logs/current/intel.log + - /usr/local/var/spool/zeek/intel.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - zeek-intel + - condition: ${kubernetes.hints.zeek.irc.enabled} == true or ${kubernetes.hints.zeek.enabled} == true + data_stream: + dataset: zeek.irc + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.zeek.irc.stream|'all'} + paths: + - /var/log/bro/current/irc.log + - /opt/zeek/logs/current/irc.log + - /usr/local/var/spool/zeek/irc.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - zeek-irc + - condition: ${kubernetes.hints.zeek.kerberos.enabled} == true or ${kubernetes.hints.zeek.enabled} == true + data_stream: + dataset: zeek.kerberos + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.zeek.kerberos.stream|'all'} + paths: + - /var/log/bro/current/kerberos.log + - /opt/zeek/logs/current/kerberos.log + - /usr/local/var/spool/zeek/kerberos.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - zeek-kerberos + - condition: ${kubernetes.hints.zeek.known_certs.enabled} == true or ${kubernetes.hints.zeek.enabled} == true + data_stream: + dataset: zeek.known_certs + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.zeek.known_certs.stream|'all'} + paths: + - /var/log/bro/current/known_certs.log + - /opt/zeek/logs/current/known_certs.log + - /usr/local/var/spool/zeek/known_certs.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - zeek-known_certs + - condition: ${kubernetes.hints.zeek.known_hosts.enabled} == true or ${kubernetes.hints.zeek.enabled} == true + data_stream: + dataset: zeek.known_hosts + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.zeek.known_hosts.stream|'all'} + paths: + - /var/log/bro/current/known_hosts.log + - /opt/zeek/logs/current/known_hosts.log + - /usr/local/var/spool/zeek/known_hosts.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - zeek-known_hosts + - condition: ${kubernetes.hints.zeek.known_services.enabled} == true or ${kubernetes.hints.zeek.enabled} == true + data_stream: + dataset: zeek.known_services + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.zeek.known_services.stream|'all'} + paths: + - /var/log/bro/current/known_services.log + - /opt/zeek/logs/current/known_services.log + - /usr/local/var/spool/zeek/known_services.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - zeek-known_services + - condition: ${kubernetes.hints.zeek.modbus.enabled} == true or ${kubernetes.hints.zeek.enabled} == true + data_stream: + dataset: zeek.modbus + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.zeek.modbus.stream|'all'} + paths: + - /var/log/bro/current/modbus.log + - /opt/zeek/logs/current/modbus.log + - /usr/local/var/spool/zeek/modbus.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - zeek-modbus + - condition: ${kubernetes.hints.zeek.mysql.enabled} == true or ${kubernetes.hints.zeek.enabled} == true + data_stream: + dataset: zeek.mysql + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.zeek.mysql.stream|'all'} + paths: + - /var/log/bro/current/mysql.log + - /opt/zeek/logs/current/mysql.log + - /usr/local/var/spool/zeek/mysql.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - zeek-mysql + - condition: ${kubernetes.hints.zeek.notice.enabled} == true or ${kubernetes.hints.zeek.enabled} == true + data_stream: + dataset: zeek.notice + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.zeek.notice.stream|'all'} + paths: + - /var/log/bro/current/notice.log + - /opt/zeek/logs/current/notice.log + - /usr/local/var/spool/zeek/notice.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - zeek-notice + - condition: ${kubernetes.hints.zeek.ntlm.enabled} == true or ${kubernetes.hints.zeek.enabled} == true + data_stream: + dataset: zeek.ntlm + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.zeek.ntlm.stream|'all'} + paths: + - /var/log/bro/current/ntlm.log + - /opt/zeek/logs/current/ntlm.log + - /usr/local/var/spool/zeek/ntlm.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - zeek-ntlm + - condition: ${kubernetes.hints.zeek.ntp.enabled} == true or ${kubernetes.hints.zeek.enabled} == true + data_stream: + dataset: zeek.ntp + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.zeek.ntp.stream|'all'} + paths: + - /var/log/bro/current/ntp.log + - /opt/zeek/logs/current/ntp.log + - /usr/local/var/spool/zeek/ntp.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - zeek-ntp + - condition: ${kubernetes.hints.zeek.ocsp.enabled} == true or ${kubernetes.hints.zeek.enabled} == true + data_stream: + dataset: zeek.ocsp + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.zeek.ocsp.stream|'all'} + paths: + - /var/log/bro/current/ocsp.log + - /opt/zeek/logs/current/ocsp.log + - /usr/local/var/spool/zeek/ocsp.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - zeek-ocsp + - condition: ${kubernetes.hints.zeek.pe.enabled} == true or ${kubernetes.hints.zeek.enabled} == true + data_stream: + dataset: zeek.pe + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.zeek.pe.stream|'all'} + paths: + - /var/log/bro/current/pe.log + - /opt/zeek/logs/current/pe.log + - /usr/local/var/spool/zeek/pe.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - zeek-pe + - condition: ${kubernetes.hints.zeek.radius.enabled} == true or ${kubernetes.hints.zeek.enabled} == true + data_stream: + dataset: zeek.radius + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.zeek.radius.stream|'all'} + paths: + - /var/log/bro/current/radius.log + - /opt/zeek/logs/current/radius.log + - /usr/local/var/spool/zeek/radius.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - zeek-radius + - condition: ${kubernetes.hints.zeek.rdp.enabled} == true or ${kubernetes.hints.zeek.enabled} == true + data_stream: + dataset: zeek.rdp + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.zeek.rdp.stream|'all'} + paths: + - /var/log/bro/current/rdp.log + - /opt/zeek/logs/current/rdp.log + - /usr/local/var/spool/zeek/rdp.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - zeek-rdp + - condition: ${kubernetes.hints.zeek.rfb.enabled} == true or ${kubernetes.hints.zeek.enabled} == true + data_stream: + dataset: zeek.rfb + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.zeek.rfb.stream|'all'} + paths: + - /var/log/bro/current/rfb.log + - /opt/zeek/logs/current/rfb.log + - /usr/local/var/spool/zeek/rfb.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - zeek-rfb + - condition: ${kubernetes.hints.zeek.signature.enabled} == true or ${kubernetes.hints.zeek.enabled} == true + data_stream: + dataset: zeek.signature + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.zeek.signature.stream|'all'} + paths: + - /var/log/bro/current/signature.log + - /opt/zeek/logs/current/signature.log + - /usr/local/var/spool/zeek/signature.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - zeek-signature + - condition: ${kubernetes.hints.zeek.sip.enabled} == true or ${kubernetes.hints.zeek.enabled} == true + data_stream: + dataset: zeek.sip + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.zeek.sip.stream|'all'} + paths: + - /var/log/bro/current/sip.log + - /opt/zeek/logs/current/sip.log + - /usr/local/var/spool/zeek/sip.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - zeek-sip + - condition: ${kubernetes.hints.zeek.smb_cmd.enabled} == true or ${kubernetes.hints.zeek.enabled} == true + data_stream: + dataset: zeek.smb_cmd + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.zeek.smb_cmd.stream|'all'} + paths: + - /var/log/bro/current/smb_cmd.log + - /opt/zeek/logs/current/smb_cmd.log + - /usr/local/var/spool/zeek/smb_cmd.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - zeek-smb-cmd + - condition: ${kubernetes.hints.zeek.smb_files.enabled} == true or ${kubernetes.hints.zeek.enabled} == true + data_stream: + dataset: zeek.smb_files + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.zeek.smb_files.stream|'all'} + paths: + - /var/log/bro/current/smb_files.log + - /opt/zeek/logs/current/smb_files.log + - /usr/local/var/spool/zeek/smb_files.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - zeek-smb-files + - condition: ${kubernetes.hints.zeek.smb_mapping.enabled} == true or ${kubernetes.hints.zeek.enabled} == true + data_stream: + dataset: zeek.smb_mapping + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.zeek.smb_mapping.stream|'all'} + paths: + - /var/log/bro/current/smb_mapping.log + - /opt/zeek/logs/current/smb_mapping.log + - /usr/local/var/spool/zeek/smb_mapping.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - zeek.smb_mapping + - condition: ${kubernetes.hints.zeek.smtp.enabled} == true or ${kubernetes.hints.zeek.enabled} == true + data_stream: + dataset: zeek.smtp + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.zeek.smtp.stream|'all'} + paths: + - /var/log/bro/current/smtp.log + - /opt/zeek/logs/current/smtp.log + - /usr/local/var/spool/zeek/smtp.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - zeek-smtp + - condition: ${kubernetes.hints.zeek.snmp.enabled} == true or ${kubernetes.hints.zeek.enabled} == true + data_stream: + dataset: zeek.snmp + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.zeek.snmp.stream|'all'} + paths: + - /var/log/bro/current/snmp.log + - /opt/zeek/logs/current/snmp.log + - /usr/local/var/spool/zeek/snmp.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - zeek-snmp + - condition: ${kubernetes.hints.zeek.socks.enabled} == true or ${kubernetes.hints.zeek.enabled} == true + data_stream: + dataset: zeek.socks + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.zeek.socks.stream|'all'} + paths: + - /var/log/bro/current/socks.log + - /opt/zeek/logs/current/socks.log + - /usr/local/var/spool/zeek/socks.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - zeek-socks + - condition: ${kubernetes.hints.zeek.software.enabled} == true or ${kubernetes.hints.zeek.enabled} == true + data_stream: + dataset: zeek.software + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.zeek.software.stream|'all'} + paths: + - /var/log/bro/current/software.log + - /opt/zeek/logs/current/software.log + - /usr/local/var/spool/zeek/software.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - zeek-software + - condition: ${kubernetes.hints.zeek.ssh.enabled} == true or ${kubernetes.hints.zeek.enabled} == true + data_stream: + dataset: zeek.ssh + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.zeek.ssh.stream|'all'} + paths: + - /var/log/bro/current/ssh.log + - /opt/zeek/logs/current/ssh.log + - /usr/local/var/spool/zeek/ssh.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - zeek-ssh + - condition: ${kubernetes.hints.zeek.ssl.enabled} == true or ${kubernetes.hints.zeek.enabled} == true + data_stream: + dataset: zeek.ssl + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.zeek.ssl.stream|'all'} + paths: + - /var/log/bro/current/ssl.log + - /opt/zeek/logs/current/ssl.log + - /usr/local/var/spool/zeek/ssl.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - zeek-ssl + - condition: ${kubernetes.hints.zeek.stats.enabled} == true or ${kubernetes.hints.zeek.enabled} == true + data_stream: + dataset: zeek.stats + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.zeek.stats.stream|'all'} + paths: + - /var/log/bro/current/stats.log + - /opt/zeek/logs/current/stats.log + - /usr/local/var/spool/zeek/stats.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - zeek-stats + - condition: ${kubernetes.hints.zeek.syslog.enabled} == true or ${kubernetes.hints.zeek.enabled} == true + data_stream: + dataset: zeek.syslog + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.zeek.syslog.stream|'all'} + paths: + - /var/log/bro/current/syslog.log + - /opt/zeek/logs/current/syslog.log + - /usr/local/var/spool/zeek/syslog.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - zeek-syslog + - condition: ${kubernetes.hints.zeek.traceroute.enabled} == true or ${kubernetes.hints.zeek.enabled} == true + data_stream: + dataset: zeek.traceroute + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.zeek.traceroute.stream|'all'} + paths: + - /var/log/bro/current/traceroute.log + - /opt/zeek/logs/current/traceroute.log + - /usr/local/var/spool/zeek/traceroute.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - zeek-traceroute + - condition: ${kubernetes.hints.zeek.tunnel.enabled} == true or ${kubernetes.hints.zeek.enabled} == true + data_stream: + dataset: zeek.tunnel + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.zeek.tunnel.stream|'all'} + paths: + - /var/log/bro/current/tunnel.log + - /opt/zeek/logs/current/tunnel.log + - /usr/local/var/spool/zeek/tunnel.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - zeek-tunnel + - condition: ${kubernetes.hints.zeek.weird.enabled} == true or ${kubernetes.hints.zeek.enabled} == true + data_stream: + dataset: zeek.weird + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.zeek.weird.stream|'all'} + paths: + - /var/log/bro/current/weird.log + - /opt/zeek/logs/current/weird.log + - /usr/local/var/spool/zeek/weird.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - zeek-weird + - condition: ${kubernetes.hints.zeek.x509.enabled} == true or ${kubernetes.hints.zeek.enabled} == true + data_stream: + dataset: zeek.x509 + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.zeek.x509.stream|'all'} + paths: + - /var/log/bro/current/x509.log + - /opt/zeek/logs/current/x509.log + - /usr/local/var/spool/zeek/x509.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - zeek-x509 + data_stream.namespace: default + - name: httpjson-zeek + type: httpjson + use_output: default + streams: + - condition: ${kubernetes.hints.zeek.capture_loss.enabled} == true and ${kubernetes.hints.zeek.enabled} == true + config_version: 2 + cursor: + index_earliest: + value: '[[.last_event.result.max_indextime]]' + data_stream: + dataset: zeek.capture_loss + type: logs + interval: 10s + request.method: POST + request.transforms: + - set: + target: url.params.search + value: search sourcetype="capture_loss-*" | streamstats max(_indextime) AS max_indextime + - set: + target: url.params.output_mode + value: json + - set: + default: '[[(now (parseDuration "-10s")).Unix]]' + target: url.params.index_earliest + value: '[[ .cursor.index_earliest ]]' + - set: + target: url.params.index_latest + value: '[[(now).Unix]]' + - set: + target: header.Content-Type + value: application/x-www-form-urlencoded + request.url: https://server.example.com:8089/services/search/jobs/export + response.decode_as: application/x-ndjson + response.split: + delimiter: |4+ + target: body.result._raw + type: string + tags: + - forwarded + - zeek-capture-loss + - condition: ${kubernetes.hints.zeek.connection.enabled} == true and ${kubernetes.hints.zeek.enabled} == true + config_version: 2 + cursor: + index_earliest: + value: '[[.last_event.result.max_indextime]]' + data_stream: + dataset: zeek.connection + type: logs + interval: 10s + request.method: POST + request.transforms: + - set: + target: url.params.search + value: search sourcetype="conn-*" | streamstats max(_indextime) AS max_indextime + - set: + target: url.params.output_mode + value: json + - set: + default: '[[(now (parseDuration "-10s")).Unix]]' + target: url.params.index_earliest + value: '[[ .cursor.index_earliest ]]' + - set: + target: url.params.index_latest + value: '[[(now).Unix]]' + - set: + target: header.Content-Type + value: application/x-www-form-urlencoded + request.url: https://server.example.com:8089/services/search/jobs/export + response.decode_as: application/x-ndjson + response.split: + delimiter: |4+ + target: body.result._raw + type: string + tags: + - forwarded + - zeek-connection + - condition: ${kubernetes.hints.zeek.dce_rpc.enabled} == true and ${kubernetes.hints.zeek.enabled} == true + config_version: 2 + cursor: + index_earliest: + value: '[[.last_event.result.max_indextime]]' + data_stream: + dataset: zeek.dce_rpc + type: logs + interval: 10s + request.method: POST + request.transforms: + - set: + target: url.params.search + value: search sourcetype="dce_rpc-*" | streamstats max(_indextime) AS max_indextime + - set: + target: url.params.output_mode + value: json + - set: + default: '[[(now (parseDuration "-10s")).Unix]]' + target: url.params.index_earliest + value: '[[ .cursor.index_earliest ]]' + - set: + target: url.params.index_latest + value: '[[(now).Unix]]' + - set: + target: header.Content-Type + value: application/x-www-form-urlencoded + request.url: https://server.example.com:8089/services/search/jobs/export + response.decode_as: application/x-ndjson + response.split: + delimiter: |4+ + target: body.result._raw + type: string + tags: + - forwarded + - zeek-dce-rpc + - condition: ${kubernetes.hints.zeek.dhcp.enabled} == true and ${kubernetes.hints.zeek.enabled} == true + config_version: 2 + cursor: + index_earliest: + value: '[[.last_event.result.max_indextime]]' + data_stream: + dataset: zeek.dhcp + type: logs + interval: 10s + request.method: POST + request.transforms: + - set: + target: url.params.search + value: search sourcetype="dhcp-*" | streamstats max(_indextime) AS max_indextime + - set: + target: url.params.output_mode + value: json + - set: + default: '[[(now (parseDuration "-10s")).Unix]]' + target: url.params.index_earliest + value: '[[ .cursor.index_earliest ]]' + - set: + target: url.params.index_latest + value: '[[(now).Unix]]' + - set: + target: header.Content-Type + value: application/x-www-form-urlencoded + request.url: https://server.example.com:8089/services/search/jobs/export + response.decode_as: application/x-ndjson + response.split: + delimiter: |4+ + target: body.result._raw + type: string + tags: + - forwarded + - zeek-dhcp + - condition: ${kubernetes.hints.zeek.dnp3.enabled} == true and ${kubernetes.hints.zeek.enabled} == true + config_version: 2 + cursor: + index_earliest: + value: '[[.last_event.result.max_indextime]]' + data_stream: + dataset: zeek.dnp3 + type: logs + interval: 10s + request.method: POST + request.transforms: + - set: + target: url.params.search + value: search sourcetype="dnp3-*" | streamstats max(_indextime) AS max_indextime + - set: + target: url.params.output_mode + value: json + - set: + default: '[[(now (parseDuration "-10s")).Unix]]' + target: url.params.index_earliest + value: '[[ .cursor.index_earliest ]]' + - set: + target: url.params.index_latest + value: '[[(now).Unix]]' + - set: + target: header.Content-Type + value: application/x-www-form-urlencoded + request.url: https://server.example.com:8089/services/search/jobs/export + response.decode_as: application/x-ndjson + response.split: + delimiter: |4+ + target: body.result._raw + type: string + tags: + - forwarded + - zeek-dnp3 + - condition: ${kubernetes.hints.zeek.dns.enabled} == true and ${kubernetes.hints.zeek.enabled} == true + config_version: 2 + cursor: + index_earliest: + value: '[[.last_event.result.max_indextime]]' + data_stream: + dataset: zeek.dns + type: logs + interval: 10s + request.method: POST + request.transforms: + - set: + target: url.params.search + value: search sourcetype="dns-*" | streamstats max(_indextime) AS max_indextime + - set: + target: url.params.output_mode + value: json + - set: + default: '[[(now (parseDuration "-10s")).Unix]]' + target: url.params.index_earliest + value: '[[ .cursor.index_earliest ]]' + - set: + target: url.params.index_latest + value: '[[(now).Unix]]' + - set: + target: header.Content-Type + value: application/x-www-form-urlencoded + request.url: https://server.example.com:8089/services/search/jobs/export + response.decode_as: application/x-ndjson + response.split: + delimiter: |4+ + target: body.result._raw + type: string + tags: + - forwarded + - zeek-dns + - condition: ${kubernetes.hints.zeek.dpd.enabled} == true and ${kubernetes.hints.zeek.enabled} == true + config_version: 2 + cursor: + index_earliest: + value: '[[.last_event.result.max_indextime]]' + data_stream: + dataset: zeek.dpd + type: logs + interval: 10s + request.method: POST + request.transforms: + - set: + target: url.params.search + value: search sourcetype="dpd-*" | streamstats max(_indextime) AS max_indextime + - set: + target: url.params.output_mode + value: json + - set: + default: '[[(now (parseDuration "-10s")).Unix]]' + target: url.params.index_earliest + value: '[[ .cursor.index_earliest ]]' + - set: + target: url.params.index_latest + value: '[[(now).Unix]]' + - set: + target: header.Content-Type + value: application/x-www-form-urlencoded + request.url: https://server.example.com:8089/services/search/jobs/export + response.decode_as: application/x-ndjson + response.split: + delimiter: |4+ + target: body.result._raw + type: string + tags: + - forwarded + - zeek-dpd + - condition: ${kubernetes.hints.zeek.files.enabled} == true and ${kubernetes.hints.zeek.enabled} == true + config_version: 2 + cursor: + index_earliest: + value: '[[.last_event.result.max_indextime]]' + data_stream: + dataset: zeek.files + type: logs + interval: 10s + request.method: POST + request.transforms: + - set: + target: url.params.search + value: search sourcetype="files-*" | streamstats max(_indextime) AS max_indextime + - set: + target: url.params.output_mode + value: json + - set: + default: '[[(now (parseDuration "-10s")).Unix]]' + target: url.params.index_earliest + value: '[[ .cursor.index_earliest ]]' + - set: + target: url.params.index_latest + value: '[[(now).Unix]]' + - set: + target: header.Content-Type + value: application/x-www-form-urlencoded + request.url: https://server.example.com:8089/services/search/jobs/export + response.decode_as: application/x-ndjson + response.split: + delimiter: |4+ + target: body.result._raw + type: string + tags: + - forwarded + - zeek-files + - condition: ${kubernetes.hints.zeek.ftp.enabled} == true and ${kubernetes.hints.zeek.enabled} == true + config_version: 2 + cursor: + index_earliest: + value: '[[.last_event.result.max_indextime]]' + data_stream: + dataset: zeek.ftp + type: logs + interval: 10s + request.method: POST + request.transforms: + - set: + target: url.params.search + value: search sourcetype="ftp-*" | streamstats max(_indextime) AS max_indextime + - set: + target: url.params.output_mode + value: json + - set: + default: '[[(now (parseDuration "-10s")).Unix]]' + target: url.params.index_earliest + value: '[[ .cursor.index_earliest ]]' + - set: + target: url.params.index_latest + value: '[[(now).Unix]]' + - set: + target: header.Content-Type + value: application/x-www-form-urlencoded + request.url: https://server.example.com:8089/services/search/jobs/export + response.decode_as: application/x-ndjson + response.split: + delimiter: |4+ + target: body.result._raw + type: string + tags: + - forwarded + - zeek-ftp + - condition: ${kubernetes.hints.zeek.http.enabled} == true and ${kubernetes.hints.zeek.enabled} == true + config_version: 2 + cursor: + index_earliest: + value: '[[.last_event.result.max_indextime]]' + data_stream: + dataset: zeek.http + type: logs + interval: 10s + request.method: POST + request.transforms: + - set: + target: url.params.search + value: search sourcetype="http-*" | streamstats max(_indextime) AS max_indextime + - set: + target: url.params.output_mode + value: json + - set: + default: '[[(now (parseDuration "-10s")).Unix]]' + target: url.params.index_earliest + value: '[[ .cursor.index_earliest ]]' + - set: + target: url.params.index_latest + value: '[[(now).Unix]]' + - set: + target: header.Content-Type + value: application/x-www-form-urlencoded + request.url: https://server.example.com:8089/services/search/jobs/export + response.decode_as: application/x-ndjson + response.split: + delimiter: |4+ + target: body.result._raw + type: string + tags: + - forwarded + - zeek-http + - condition: ${kubernetes.hints.zeek.intel.enabled} == true and ${kubernetes.hints.zeek.enabled} == true + config_version: 2 + cursor: + index_earliest: + value: '[[.last_event.result.max_indextime]]' + data_stream: + dataset: zeek.intel + type: logs + interval: 10s + request.method: POST + request.transforms: + - set: + target: url.params.search + value: search sourcetype="intel-*" | streamstats max(_indextime) AS max_indextime + - set: + target: url.params.output_mode + value: json + - set: + default: '[[(now (parseDuration "-10s")).Unix]]' + target: url.params.index_earliest + value: '[[ .cursor.index_earliest ]]' + - set: + target: url.params.index_latest + value: '[[(now).Unix]]' + - set: + target: header.Content-Type + value: application/x-www-form-urlencoded + request.url: https://server.example.com:8089/services/search/jobs/export + response.decode_as: application/x-ndjson + response.split: + delimiter: |4+ + target: body.result._raw + type: string + tags: + - forwarded + - zeek-intel + - condition: ${kubernetes.hints.zeek.irc.enabled} == true and ${kubernetes.hints.zeek.enabled} == true + config_version: 2 + cursor: + index_earliest: + value: '[[.last_event.result.max_indextime]]' + data_stream: + dataset: zeek.irc + type: logs + interval: 10s + request.method: POST + request.transforms: + - set: + target: url.params.search + value: search sourcetype="irc-*" | streamstats max(_indextime) AS max_indextime + - set: + target: url.params.output_mode + value: json + - set: + default: '[[(now (parseDuration "-10s")).Unix]]' + target: url.params.index_earliest + value: '[[ .cursor.index_earliest ]]' + - set: + target: url.params.index_latest + value: '[[(now).Unix]]' + - set: + target: header.Content-Type + value: application/x-www-form-urlencoded + request.url: https://server.example.com:8089/services/search/jobs/export + response.decode_as: application/x-ndjson + response.split: + delimiter: |4+ + target: body.result._raw + type: string + tags: + - forwarded + - zeek-irc + - condition: ${kubernetes.hints.zeek.kerberos.enabled} == true and ${kubernetes.hints.zeek.enabled} == true + config_version: 2 + cursor: + index_earliest: + value: '[[.last_event.result.max_indextime]]' + data_stream: + dataset: zeek.kerberos + type: logs + interval: 10s + request.method: POST + request.transforms: + - set: + target: url.params.search + value: search sourcetype="kerberos-*" | streamstats max(_indextime) AS max_indextime + - set: + target: url.params.output_mode + value: json + - set: + default: '[[(now (parseDuration "-10s")).Unix]]' + target: url.params.index_earliest + value: '[[ .cursor.index_earliest ]]' + - set: + target: url.params.index_latest + value: '[[(now).Unix]]' + - set: + target: header.Content-Type + value: application/x-www-form-urlencoded + request.url: https://server.example.com:8089/services/search/jobs/export + response.decode_as: application/x-ndjson + response.split: + delimiter: |4+ + target: body.result._raw + type: string + tags: + - forwarded + - zeek-kerberos + - condition: ${kubernetes.hints.zeek.modbus.enabled} == true and ${kubernetes.hints.zeek.enabled} == true + config_version: 2 + cursor: + index_earliest: + value: '[[.last_event.result.max_indextime]]' + data_stream: + dataset: zeek.modbus + type: logs + interval: 10s + request.method: POST + request.transforms: + - set: + target: url.params.search + value: search sourcetype="modbus-*" | streamstats max(_indextime) AS max_indextime + - set: + target: url.params.output_mode + value: json + - set: + default: '[[(now (parseDuration "-10s")).Unix]]' + target: url.params.index_earliest + value: '[[ .cursor.index_earliest ]]' + - set: + target: url.params.index_latest + value: '[[(now).Unix]]' + - set: + target: header.Content-Type + value: application/x-www-form-urlencoded + request.url: https://server.example.com:8089/services/search/jobs/export + response.decode_as: application/x-ndjson + response.split: + delimiter: |4+ + target: body.result._raw + type: string + tags: + - forwarded + - zeek-modbus + - condition: ${kubernetes.hints.zeek.mysql.enabled} == true and ${kubernetes.hints.zeek.enabled} == true + config_version: 2 + cursor: + index_earliest: + value: '[[.last_event.result.max_indextime]]' + data_stream: + dataset: zeek.mysql + type: logs + interval: 10s + request.method: POST + request.transforms: + - set: + target: url.params.search + value: search sourcetype="mysql-*" | streamstats max(_indextime) AS max_indextime + - set: + target: url.params.output_mode + value: json + - set: + default: '[[(now (parseDuration "-10s")).Unix]]' + target: url.params.index_earliest + value: '[[ .cursor.index_earliest ]]' + - set: + target: url.params.index_latest + value: '[[(now).Unix]]' + - set: + target: header.Content-Type + value: application/x-www-form-urlencoded + request.url: https://server.example.com:8089/services/search/jobs/export + response.decode_as: application/x-ndjson + response.split: + delimiter: |4+ + target: body.result._raw + type: string + tags: + - forwarded + - zeek-mysql + - condition: ${kubernetes.hints.zeek.notice.enabled} == true and ${kubernetes.hints.zeek.enabled} == true + config_version: 2 + cursor: + index_earliest: + value: '[[.last_event.result.max_indextime]]' + data_stream: + dataset: zeek.notice + type: logs + interval: 10s + request.method: POST + request.transforms: + - set: + target: url.params.search + value: search sourcetype="notice-*" | streamstats max(_indextime) AS max_indextime + - set: + target: url.params.output_mode + value: json + - set: + default: '[[(now (parseDuration "-10s")).Unix]]' + target: url.params.index_earliest + value: '[[ .cursor.index_earliest ]]' + - set: + target: url.params.index_latest + value: '[[(now).Unix]]' + - set: + target: header.Content-Type + value: application/x-www-form-urlencoded + request.url: https://server.example.com:8089/services/search/jobs/export + response.decode_as: application/x-ndjson + response.split: + delimiter: |4+ + target: body.result._raw + type: string + tags: + - forwarded + - zeek-notice + - condition: ${kubernetes.hints.zeek.ntlm.enabled} == true and ${kubernetes.hints.zeek.enabled} == true + config_version: 2 + cursor: + index_earliest: + value: '[[.last_event.result.max_indextime]]' + data_stream: + dataset: zeek.ntlm + type: logs + interval: 10s + request.method: POST + request.transforms: + - set: + target: url.params.search + value: search sourcetype="ntlm-*" | streamstats max(_indextime) AS max_indextime + - set: + target: url.params.output_mode + value: json + - set: + default: '[[(now (parseDuration "-10s")).Unix]]' + target: url.params.index_earliest + value: '[[ .cursor.index_earliest ]]' + - set: + target: url.params.index_latest + value: '[[(now).Unix]]' + - set: + target: header.Content-Type + value: application/x-www-form-urlencoded + request.url: https://server.example.com:8089/services/search/jobs/export + response.decode_as: application/x-ndjson + response.split: + delimiter: |4+ + target: body.result._raw + type: string + tags: + - forwarded + - zeek-ntlm + - condition: ${kubernetes.hints.zeek.ntp.enabled} == true and ${kubernetes.hints.zeek.enabled} == true + config_version: 2 + cursor: + index_earliest: + value: '[[.last_event.result.max_indextime]]' + data_stream: + dataset: zeek.ntp + type: logs + interval: 10s + request.method: POST + request.transforms: + - set: + target: url.params.search + value: search sourcetype="ntp-*" | streamstats max(_indextime) AS max_indextime + - set: + target: url.params.output_mode + value: json + - set: + default: '[[(now (parseDuration "-10s")).Unix]]' + target: url.params.index_earliest + value: '[[ .cursor.index_earliest ]]' + - set: + target: url.params.index_latest + value: '[[(now).Unix]]' + - set: + target: header.Content-Type + value: application/x-www-form-urlencoded + request.url: https://server.example.com:8089/services/search/jobs/export + response.decode_as: application/x-ndjson + response.split: + delimiter: |4+ + target: body.result._raw + type: string + tags: + - forwarded + - zeek-ntp + - condition: ${kubernetes.hints.zeek.ocsp.enabled} == true and ${kubernetes.hints.zeek.enabled} == true + config_version: 2 + cursor: + index_earliest: + value: '[[.last_event.result.max_indextime]]' + data_stream: + dataset: zeek.ocsp + type: logs + interval: 10s + request.method: POST + request.transforms: + - set: + target: url.params.search + value: search sourcetype="ocsp-*" | streamstats max(_indextime) AS max_indextime + - set: + target: url.params.output_mode + value: json + - set: + default: '[[(now (parseDuration "-10s")).Unix]]' + target: url.params.index_earliest + value: '[[ .cursor.index_earliest ]]' + - set: + target: url.params.index_latest + value: '[[(now).Unix]]' + - set: + target: header.Content-Type + value: application/x-www-form-urlencoded + request.url: https://server.example.com:8089/services/search/jobs/export + response.decode_as: application/x-ndjson + response.split: + delimiter: |4+ + target: body.result._raw + type: string + tags: + - forwarded + - zeek-ocsp + - condition: ${kubernetes.hints.zeek.pe.enabled} == true and ${kubernetes.hints.zeek.enabled} == true + config_version: 2 + cursor: + index_earliest: + value: '[[.last_event.result.max_indextime]]' + data_stream: + dataset: zeek.pe + type: logs + interval: 10s + request.method: POST + request.transforms: + - set: + target: url.params.search + value: search sourcetype="pe-*" | streamstats max(_indextime) AS max_indextime + - set: + target: url.params.output_mode + value: json + - set: + default: '[[(now (parseDuration "-10s")).Unix]]' + target: url.params.index_earliest + value: '[[ .cursor.index_earliest ]]' + - set: + target: url.params.index_latest + value: '[[(now).Unix]]' + - set: + target: header.Content-Type + value: application/x-www-form-urlencoded + request.url: https://server.example.com:8089/services/search/jobs/export + response.decode_as: application/x-ndjson + response.split: + delimiter: |4+ + target: body.result._raw + type: string + tags: + - forwarded + - zeek-pe + - condition: ${kubernetes.hints.zeek.radius.enabled} == true and ${kubernetes.hints.zeek.enabled} == true + config_version: 2 + cursor: + index_earliest: + value: '[[.last_event.result.max_indextime]]' + data_stream: + dataset: zeek.radius + type: logs + interval: 10s + request.method: POST + request.transforms: + - set: + target: url.params.search + value: search sourcetype="radius-*" | streamstats max(_indextime) AS max_indextime + - set: + target: url.params.output_mode + value: json + - set: + default: '[[(now (parseDuration "-10s")).Unix]]' + target: url.params.index_earliest + value: '[[ .cursor.index_earliest ]]' + - set: + target: url.params.index_latest + value: '[[(now).Unix]]' + - set: + target: header.Content-Type + value: application/x-www-form-urlencoded + request.url: https://server.example.com:8089/services/search/jobs/export + response.decode_as: application/x-ndjson + response.split: + delimiter: |4+ + target: body.result._raw + type: string + tags: + - forwarded + - zeek-radius + - condition: ${kubernetes.hints.zeek.rdp.enabled} == true and ${kubernetes.hints.zeek.enabled} == true + config_version: 2 + cursor: + index_earliest: + value: '[[.last_event.result.max_indextime]]' + data_stream: + dataset: zeek.rdp + type: logs + interval: 10s + request.method: POST + request.transforms: + - set: + target: url.params.search + value: search sourcetype="rdp-*" | streamstats max(_indextime) AS max_indextime + - set: + target: url.params.output_mode + value: json + - set: + default: '[[(now (parseDuration "-10s")).Unix]]' + target: url.params.index_earliest + value: '[[ .cursor.index_earliest ]]' + - set: + target: url.params.index_latest + value: '[[(now).Unix]]' + - set: + target: header.Content-Type + value: application/x-www-form-urlencoded + request.url: https://server.example.com:8089/services/search/jobs/export + response.decode_as: application/x-ndjson + response.split: + delimiter: |4+ + target: body.result._raw + type: string + tags: + - forwarded + - zeek-rdp + - condition: ${kubernetes.hints.zeek.rfb.enabled} == true and ${kubernetes.hints.zeek.enabled} == true + config_version: 2 + cursor: + index_earliest: + value: '[[.last_event.result.max_indextime]]' + data_stream: + dataset: zeek.rfb + type: logs + interval: 10s + request.method: POST + request.transforms: + - set: + target: url.params.search + value: search sourcetype="rfb-*" | streamstats max(_indextime) AS max_indextime + - set: + target: url.params.output_mode + value: json + - set: + default: '[[(now (parseDuration "-10s")).Unix]]' + target: url.params.index_earliest + value: '[[ .cursor.index_earliest ]]' + - set: + target: url.params.index_latest + value: '[[(now).Unix]]' + - set: + target: header.Content-Type + value: application/x-www-form-urlencoded + request.url: https://server.example.com:8089/services/search/jobs/export + response.decode_as: application/x-ndjson + response.split: + delimiter: |4+ + target: body.result._raw + type: string + tags: + - forwarded + - zeek-rfb + - condition: ${kubernetes.hints.zeek.signature.enabled} == true and ${kubernetes.hints.zeek.enabled} == true + config_version: 2 + cursor: + index_earliest: + value: '[[.last_event.result.max_indextime]]' + data_stream: + dataset: zeek.signature + type: logs + interval: 10s + request.method: POST + request.transforms: + - set: + target: url.params.search + value: search sourcetype="signature-*" | streamstats max(_indextime) AS max_indextime + - set: + target: url.params.output_mode + value: json + - set: + default: '[[(now (parseDuration "-10s")).Unix]]' + target: url.params.index_earliest + value: '[[ .cursor.index_earliest ]]' + - set: + target: url.params.index_latest + value: '[[(now).Unix]]' + - set: + target: header.Content-Type + value: application/x-www-form-urlencoded + request.url: https://server.example.com:8089/services/search/jobs/export + response.decode_as: application/x-ndjson + response.split: + delimiter: |4+ + target: body.result._raw + type: string + tags: + - forwarded + - zeek-signature + - condition: ${kubernetes.hints.zeek.sip.enabled} == true and ${kubernetes.hints.zeek.enabled} == true + config_version: 2 + cursor: + index_earliest: + value: '[[.last_event.result.max_indextime]]' + data_stream: + dataset: zeek.sip + type: logs + interval: 10s + request.method: POST + request.transforms: + - set: + target: url.params.search + value: search sourcetype="sip-*" | streamstats max(_indextime) AS max_indextime + - set: + target: url.params.output_mode + value: json + - set: + default: '[[(now (parseDuration "-10s")).Unix]]' + target: url.params.index_earliest + value: '[[ .cursor.index_earliest ]]' + - set: + target: url.params.index_latest + value: '[[(now).Unix]]' + - set: + target: header.Content-Type + value: application/x-www-form-urlencoded + request.url: https://server.example.com:8089/services/search/jobs/export + response.decode_as: application/x-ndjson + response.split: + delimiter: |4+ + target: body.result._raw + type: string + tags: + - forwarded + - zeek-sip + - condition: ${kubernetes.hints.zeek.smb_cmd.enabled} == true and ${kubernetes.hints.zeek.enabled} == true + config_version: 2 + cursor: + index_earliest: + value: '[[.last_event.result.max_indextime]]' + data_stream: + dataset: zeek.smb_cmd + type: logs + interval: 10s + request.method: POST + request.transforms: + - set: + target: url.params.search + value: search sourcetype="smb_cmd-*" | streamstats max(_indextime) AS max_indextime + - set: + target: url.params.output_mode + value: json + - set: + default: '[[(now (parseDuration "-10s")).Unix]]' + target: url.params.index_earliest + value: '[[ .cursor.index_earliest ]]' + - set: + target: url.params.index_latest + value: '[[(now).Unix]]' + - set: + target: header.Content-Type + value: application/x-www-form-urlencoded + request.url: https://server.example.com:8089/services/search/jobs/export + response.decode_as: application/x-ndjson + response.split: + delimiter: |4+ + target: body.result._raw + type: string + tags: + - forwarded + - zeek-smb-cmd + - condition: ${kubernetes.hints.zeek.smb_files.enabled} == true and ${kubernetes.hints.zeek.enabled} == true + config_version: 2 + cursor: + index_earliest: + value: '[[.last_event.result.max_indextime]]' + data_stream: + dataset: zeek.smb_files + type: logs + interval: 10s + request.method: POST + request.transforms: + - set: + target: url.params.search + value: search sourcetype="smb_files-*" | streamstats max(_indextime) AS max_indextime + - set: + target: url.params.output_mode + value: json + - set: + default: '[[(now (parseDuration "-10s")).Unix]]' + target: url.params.index_earliest + value: '[[ .cursor.index_earliest ]]' + - set: + target: url.params.index_latest + value: '[[(now).Unix]]' + - set: + target: header.Content-Type + value: application/x-www-form-urlencoded + request.url: https://server.example.com:8089/services/search/jobs/export + response.decode_as: application/x-ndjson + response.split: + delimiter: |4+ + target: body.result._raw + type: string + tags: + - forwarded + - zeek-smb-files + - condition: ${kubernetes.hints.zeek.smb_mapping.enabled} == true and ${kubernetes.hints.zeek.enabled} == true + config_version: 2 + cursor: + index_earliest: + value: '[[.last_event.result.max_indextime]]' + data_stream: + dataset: zeek.smb_mapping + type: logs + interval: 10s + request.method: POST + request.transforms: + - set: + target: url.params.search + value: search sourcetype="smb_mapping-*" | streamstats max(_indextime) AS max_indextime + - set: + target: url.params.output_mode + value: json + - set: + default: '[[(now (parseDuration "-10s")).Unix]]' + target: url.params.index_earliest + value: '[[ .cursor.index_earliest ]]' + - set: + target: url.params.index_latest + value: '[[(now).Unix]]' + - set: + target: header.Content-Type + value: application/x-www-form-urlencoded + request.url: https://server.example.com:8089/services/search/jobs/export + response.decode_as: application/x-ndjson + response.split: + delimiter: |4+ + target: body.result._raw + type: string + tags: + - forwarded + - condition: ${kubernetes.hints.zeek.smtp.enabled} == true and ${kubernetes.hints.zeek.enabled} == true + config_version: 2 + cursor: + index_earliest: + value: '[[.last_event.result.max_indextime]]' + data_stream: + dataset: zeek.smtp + type: logs + interval: 10s + request.method: POST + request.transforms: + - set: + target: url.params.search + value: search sourcetype="smtp-*" | streamstats max(_indextime) AS max_indextime + - set: + target: url.params.output_mode + value: json + - set: + default: '[[(now (parseDuration "-10s")).Unix]]' + target: url.params.index_earliest + value: '[[ .cursor.index_earliest ]]' + - set: + target: url.params.index_latest + value: '[[(now).Unix]]' + - set: + target: header.Content-Type + value: application/x-www-form-urlencoded + request.url: https://server.example.com:8089/services/search/jobs/export + response.decode_as: application/x-ndjson + response.split: + delimiter: |4+ + target: body.result._raw + type: string + tags: + - forwarded + - zeek-smtp + - condition: ${kubernetes.hints.zeek.snmp.enabled} == true and ${kubernetes.hints.zeek.enabled} == true + config_version: 2 + cursor: + index_earliest: + value: '[[.last_event.result.max_indextime]]' + data_stream: + dataset: zeek.snmp + type: logs + interval: 10s + request.method: POST + request.transforms: + - set: + target: url.params.search + value: search sourcetype="snmp-*" | streamstats max(_indextime) AS max_indextime + - set: + target: url.params.output_mode + value: json + - set: + default: '[[(now (parseDuration "-10s")).Unix]]' + target: url.params.index_earliest + value: '[[ .cursor.index_earliest ]]' + - set: + target: url.params.index_latest + value: '[[(now).Unix]]' + - set: + target: header.Content-Type + value: application/x-www-form-urlencoded + request.url: https://server.example.com:8089/services/search/jobs/export + response.decode_as: application/x-ndjson + response.split: + delimiter: |4+ + target: body.result._raw + type: string + tags: + - forwarded + - zeek-snmp + - condition: ${kubernetes.hints.zeek.socks.enabled} == true and ${kubernetes.hints.zeek.enabled} == true + config_version: 2 + cursor: + index_earliest: + value: '[[.last_event.result.max_indextime]]' + data_stream: + dataset: zeek.socks + type: logs + interval: 10s + request.method: POST + request.transforms: + - set: + target: url.params.search + value: search sourcetype="socks-*" | streamstats max(_indextime) AS max_indextime + - set: + target: url.params.output_mode + value: json + - set: + default: '[[(now (parseDuration "-10s")).Unix]]' + target: url.params.index_earliest + value: '[[ .cursor.index_earliest ]]' + - set: + target: url.params.index_latest + value: '[[(now).Unix]]' + - set: + target: header.Content-Type + value: application/x-www-form-urlencoded + request.url: https://server.example.com:8089/services/search/jobs/export + response.decode_as: application/x-ndjson + response.split: + delimiter: |4+ + target: body.result._raw + type: string + tags: + - forwarded + - zeek-socks + - condition: ${kubernetes.hints.zeek.ssh.enabled} == true and ${kubernetes.hints.zeek.enabled} == true + config_version: 2 + cursor: + index_earliest: + value: '[[.last_event.result.max_indextime]]' + data_stream: + dataset: zeek.ssh + type: logs + interval: 10s + request.method: POST + request.transforms: + - set: + target: url.params.search + value: search sourcetype="ssh-*" | streamstats max(_indextime) AS max_indextime + - set: + target: url.params.output_mode + value: json + - set: + default: '[[(now (parseDuration "-10s")).Unix]]' + target: url.params.index_earliest + value: '[[ .cursor.index_earliest ]]' + - set: + target: url.params.index_latest + value: '[[(now).Unix]]' + - set: + target: header.Content-Type + value: application/x-www-form-urlencoded + request.url: https://server.example.com:8089/services/search/jobs/export + response.decode_as: application/x-ndjson + response.split: + delimiter: |4+ + target: body.result._raw + type: string + tags: + - forwarded + - zeek-ssh + - condition: ${kubernetes.hints.zeek.ssl.enabled} == true and ${kubernetes.hints.zeek.enabled} == true + config_version: 2 + cursor: + index_earliest: + value: '[[.last_event.result.max_indextime]]' + data_stream: + dataset: zeek.ssl + type: logs + interval: 10s + request.method: POST + request.transforms: + - set: + target: url.params.search + value: search sourcetype="ssl-*" | streamstats max(_indextime) AS max_indextime + - set: + target: url.params.output_mode + value: json + - set: + default: '[[(now (parseDuration "-10s")).Unix]]' + target: url.params.index_earliest + value: '[[ .cursor.index_earliest ]]' + - set: + target: url.params.index_latest + value: '[[(now).Unix]]' + - set: + target: header.Content-Type + value: application/x-www-form-urlencoded + request.url: https://server.example.com:8089/services/search/jobs/export + response.decode_as: application/x-ndjson + response.split: + delimiter: |4+ + target: body.result._raw + type: string + tags: + - forwarded + - zeek-ssl + - condition: ${kubernetes.hints.zeek.stats.enabled} == true and ${kubernetes.hints.zeek.enabled} == true + config_version: 2 + cursor: + index_earliest: + value: '[[.last_event.result.max_indextime]]' + data_stream: + dataset: zeek.stats + type: logs + interval: 10s + request.method: POST + request.transforms: + - set: + target: url.params.search + value: search sourcetype="stats-*" | streamstats max(_indextime) AS max_indextime + - set: + target: url.params.output_mode + value: json + - set: + default: '[[(now (parseDuration "-10s")).Unix]]' + target: url.params.index_earliest + value: '[[ .cursor.index_earliest ]]' + - set: + target: url.params.index_latest + value: '[[(now).Unix]]' + - set: + target: header.Content-Type + value: application/x-www-form-urlencoded + request.url: https://server.example.com:8089/services/search/jobs/export + response.decode_as: application/x-ndjson + response.split: + delimiter: |4+ + target: body.result._raw + type: string + tags: + - forwarded + - zeek-stats + - condition: ${kubernetes.hints.zeek.syslog.enabled} == true and ${kubernetes.hints.zeek.enabled} == true + config_version: 2 + cursor: + index_earliest: + value: '[[.last_event.result.max_indextime]]' + data_stream: + dataset: zeek.syslog + type: logs + interval: 10s + request.method: POST + request.transforms: + - set: + target: url.params.search + value: search sourcetype="syslog-*" | streamstats max(_indextime) AS max_indextime + - set: + target: url.params.output_mode + value: json + - set: + default: '[[(now (parseDuration "-10s")).Unix]]' + target: url.params.index_earliest + value: '[[ .cursor.index_earliest ]]' + - set: + target: url.params.index_latest + value: '[[(now).Unix]]' + - set: + target: header.Content-Type + value: application/x-www-form-urlencoded + request.url: https://server.example.com:8089/services/search/jobs/export + response.decode_as: application/x-ndjson + response.split: + delimiter: |4+ + target: body.result._raw + type: string + tags: + - forwarded + - zeek-syslog + - condition: ${kubernetes.hints.zeek.traceroute.enabled} == true and ${kubernetes.hints.zeek.enabled} == true + config_version: 2 + cursor: + index_earliest: + value: '[[.last_event.result.max_indextime]]' + data_stream: + dataset: zeek.traceroute + type: logs + interval: 10s + request.method: POST + request.transforms: + - set: + target: url.params.search + value: search sourcetype="traceroute-*" | streamstats max(_indextime) AS max_indextime + - set: + target: url.params.output_mode + value: json + - set: + default: '[[(now (parseDuration "-10s")).Unix]]' + target: url.params.index_earliest + value: '[[ .cursor.index_earliest ]]' + - set: + target: url.params.index_latest + value: '[[(now).Unix]]' + - set: + target: header.Content-Type + value: application/x-www-form-urlencoded + request.url: https://server.example.com:8089/services/search/jobs/export + response.decode_as: application/x-ndjson + response.split: + delimiter: |4+ + target: body.result._raw + type: string + tags: + - forwarded + - zeek-traceroute + - condition: ${kubernetes.hints.zeek.tunnel.enabled} == true and ${kubernetes.hints.zeek.enabled} == true + config_version: 2 + cursor: + index_earliest: + value: '[[.last_event.result.max_indextime]]' + data_stream: + dataset: zeek.tunnel + type: logs + interval: 10s + request.method: POST + request.transforms: + - set: + target: url.params.search + value: search sourcetype="tunnel-*" | streamstats max(_indextime) AS max_indextime + - set: + target: url.params.output_mode + value: json + - set: + default: '[[(now (parseDuration "-10s")).Unix]]' + target: url.params.index_earliest + value: '[[ .cursor.index_earliest ]]' + - set: + target: url.params.index_latest + value: '[[(now).Unix]]' + - set: + target: header.Content-Type + value: application/x-www-form-urlencoded + request.url: https://server.example.com:8089/services/search/jobs/export + response.decode_as: application/x-ndjson + response.split: + delimiter: |4+ + target: body.result._raw + type: string + tags: + - forwarded + - zeek-tunnel + - condition: ${kubernetes.hints.zeek.weird.enabled} == true and ${kubernetes.hints.zeek.enabled} == true + config_version: 2 + cursor: + index_earliest: + value: '[[.last_event.result.max_indextime]]' + data_stream: + dataset: zeek.weird + type: logs + interval: 10s + request.method: POST + request.transforms: + - set: + target: url.params.search + value: search sourcetype="weird-*" | streamstats max(_indextime) AS max_indextime + - set: + target: url.params.output_mode + value: json + - set: + default: '[[(now (parseDuration "-10s")).Unix]]' + target: url.params.index_earliest + value: '[[ .cursor.index_earliest ]]' + - set: + target: url.params.index_latest + value: '[[(now).Unix]]' + - set: + target: header.Content-Type + value: application/x-www-form-urlencoded + request.url: https://server.example.com:8089/services/search/jobs/export + response.decode_as: application/x-ndjson + response.split: + delimiter: |4+ + target: body.result._raw + type: string + tags: + - forwarded + - zeek-weird + - condition: ${kubernetes.hints.zeek.x509.enabled} == true and ${kubernetes.hints.zeek.enabled} == true + config_version: 2 + cursor: + index_earliest: + value: '[[.last_event.result.max_indextime]]' + data_stream: + dataset: zeek.x509 + type: logs + interval: 10s + request.method: POST + request.transforms: + - set: + target: url.params.search + value: search sourcetype="x509-*" | streamstats max(_indextime) AS max_indextime + - set: + target: url.params.output_mode + value: json + - set: + default: '[[(now (parseDuration "-10s")).Unix]]' + target: url.params.index_earliest + value: '[[ .cursor.index_earliest ]]' + - set: + target: url.params.index_latest + value: '[[(now).Unix]]' + - set: + target: header.Content-Type + value: application/x-www-form-urlencoded + request.url: https://server.example.com:8089/services/search/jobs/export + response.decode_as: application/x-ndjson + response.split: + delimiter: |4+ + target: body.result._raw + type: string + tags: + - forwarded + - zeek-x509 + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/zookeeper.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/zookeeper.yml new file mode 100644 index 00000000000..5199734c315 --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/zookeeper.yml @@ -0,0 +1,54 @@ +inputs: + - name: zookeeper/metrics-zookeeper + type: zookeeper/metrics + use_output: default + streams: + - condition: ${kubernetes.hints.zookeeper.connection.enabled} == true or ${kubernetes.hints.zookeeper.enabled} == true + data_stream: + dataset: zookeeper.connection + type: metrics + hosts: + - ${kubernetes.hints.zookeeper.connection.host|'localhost:2181'} + metricsets: + - connection + period: ${kubernetes.hints.zookeeper.connection.period|'10s'} + - condition: ${kubernetes.hints.zookeeper.mntr.enabled} == true or ${kubernetes.hints.zookeeper.enabled} == true + data_stream: + dataset: zookeeper.mntr + type: metrics + hosts: + - ${kubernetes.hints.zookeeper.mntr.host|'localhost:2181'} + metricsets: + - mntr + period: ${kubernetes.hints.zookeeper.mntr.period|'10s'} + - condition: ${kubernetes.hints.zookeeper.server.enabled} == true or ${kubernetes.hints.zookeeper.enabled} == true + data_stream: + dataset: zookeeper.server + type: metrics + hosts: + - ${kubernetes.hints.zookeeper.server.host|'localhost:2181'} + metricsets: + - server + period: ${kubernetes.hints.zookeeper.server.period|'10s'} + data_stream.namespace: default + - name: filestream-zookeeper + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.zookeeper.container_logs.enabled} == true + data_stream: + dataset: kubernetes.container_logs + type: logs + exclude_files: [] + exclude_lines: [] + parsers: + - container: + format: auto + stream: all + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + prospector: + scanner: + symlinks: true + tags: [] + data_stream.namespace: default From a7c48efc51da0abae7e108720b519ce1da1c3203 Mon Sep 17 00:00:00 2001 From: Victor Martinez Date: Wed, 14 Sep 2022 17:51:19 +0100 Subject: [PATCH 125/180] ci: force GO_VERSION (#1204) --- .ci/Jenkinsfile | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.ci/Jenkinsfile b/.ci/Jenkinsfile index 9fb7021784a..c374bfeb0ef 100644 --- a/.ci/Jenkinsfile +++ b/.ci/Jenkinsfile @@ -61,6 +61,8 @@ pipeline { setEnvVar('K8S_CHANGES', isGitRegionMatch(patterns: [ '(^deploy/kubernetes/.*|^version/docs/version.asciidoc|.ci/Jenkinsfile)' ], shouldMatchAll: false).toString()) setEnvVar('EXT_WINDOWS_CHANGES', isGitRegionMatch(patterns: [ '.ci/Jenkinsfile' ], shouldMatchAll: false).toString()) setEnvVar('EXT_M1_CHANGES', isGitRegionMatch(patterns: [ '.ci/Jenkinsfile' ], shouldMatchAll: false).toString()) + // set the GO_VERSION env variable with the go version to be used in withMageEnv + setEnvVar('GO_VERSION', readFile(file: '.go-version')?.trim()) } } } From 270295f4e334931df41b47e6607c238a8d6e4b44 Mon Sep 17 00:00:00 2001 From: Aleksandr Maus Date: Wed, 14 Sep 2022 14:27:10 -0400 Subject: [PATCH 126/180] Fix whitespaces in vault_darwin.c (#1206) --- internal/pkg/agent/vault/vault_darwin.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/internal/pkg/agent/vault/vault_darwin.c b/internal/pkg/agent/vault/vault_darwin.c index c2bb85bb354..b5c1777dac1 100644 --- a/internal/pkg/agent/vault/vault_darwin.c +++ b/internal/pkg/agent/vault/vault_darwin.c @@ -209,10 +209,10 @@ OSStatus RemoveKeychainItem(SecKeychainRef keychain, const char *name, const cha char* GetOSStatusMessage(OSStatus status) { CFStringRef s = SecCopyErrorMessageString(status, NULL); char *p; - int n; - n = CFStringGetLength(s)*8; - p = malloc(n); - CFStringGetCString(s, p, n, kCFStringEncodingUTF8); + int n; + n = CFStringGetLength(s)*8; + p = malloc(n); + CFStringGetCString(s, p, n, kCFStringEncodingUTF8); CFRelease(s); - return p; + return p; } From 5a0ba4d9d4f8cd03a1c32e20f6e63640daa532e1 Mon Sep 17 00:00:00 2001 From: Elastic Machine Date: Mon, 19 Sep 2022 05:09:03 -0400 Subject: [PATCH 127/180] Update kubernetes templates for elastic-agent [templates.d] (#1231) --- .../templates.d/activemq.yml | 90 +++++++++--------- .../templates.d/apache.yml | 30 +++--- .../templates.d/cassandra.yml | 54 +++++------ .../templates.d/cloud_security_posture.yml | 93 ------------------- .../templates.d/cockroachdb.yml | 42 ++++----- .../templates.d/cyberarkpas.yml | 46 ++++----- .../templates.d/fireeye.yml | 34 +++---- .../templates.d/haproxy.yml | 32 +++---- .../templates.d/infoblox_nios.yml | 12 +++ .../templates.d/kafka.yml | 56 +++++------ .../templates.d/mongodb.yml | 44 ++++----- .../templates.d/nginx.yml | 30 +++--- .../templates.d/osquery.yml | 23 ----- .../templates.d/osquery_manager.yml | 33 ------- .../templates.d/qnap_nas.yml | 38 ++++---- .../templates.d/sentinel_one.yml | 42 ++++----- .../templates.d/symantec_endpoint.yml | 38 ++++---- 17 files changed, 300 insertions(+), 437 deletions(-) delete mode 100644 deploy/kubernetes/elastic-agent-standalone/templates.d/cloud_security_posture.yml delete mode 100644 deploy/kubernetes/elastic-agent-standalone/templates.d/osquery.yml delete mode 100644 deploy/kubernetes/elastic-agent-standalone/templates.d/osquery_manager.yml diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/activemq.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/activemq.yml index 8177cd731d2..007060a5ac0 100644 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/activemq.yml +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/activemq.yml @@ -1,4 +1,49 @@ inputs: + - name: filestream-activemq + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.activemq.audit.enabled} == true or ${kubernetes.hints.activemq.enabled} == true + data_stream: + dataset: activemq.audit + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.activemq.audit.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - activemq-audit + - condition: ${kubernetes.hints.activemq.log.enabled} == true or ${kubernetes.hints.activemq.enabled} == true + data_stream: + dataset: activemq.log + type: logs + exclude_files: + - .gz$ + multiline: + match: after + negate: true + pattern: '^\d{4}-\d{2}-\d{2} ' + parsers: + - container: + format: auto + stream: ${kubernetes.hints.activemq.log.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - activemq-log + data_stream.namespace: default - name: activemq/metrics-activemq type: activemq/metrics use_output: default @@ -49,48 +94,3 @@ inputs: - activemq-topic username: ${kubernetes.hints.activemq.topic.username|'admin'} data_stream.namespace: default - - name: filestream-activemq - type: filestream - use_output: default - streams: - - condition: ${kubernetes.hints.activemq.audit.enabled} == true or ${kubernetes.hints.activemq.enabled} == true - data_stream: - dataset: activemq.audit - type: logs - exclude_files: - - .gz$ - parsers: - - container: - format: auto - stream: ${kubernetes.hints.activemq.audit.stream|'all'} - paths: - - /var/log/containers/*${kubernetes.hints.container_id}.log - prospector: - scanner: - symlinks: true - tags: - - forwarded - - activemq-audit - - condition: ${kubernetes.hints.activemq.log.enabled} == true or ${kubernetes.hints.activemq.enabled} == true - data_stream: - dataset: activemq.log - type: logs - exclude_files: - - .gz$ - multiline: - match: after - negate: true - pattern: '^\d{4}-\d{2}-\d{2} ' - parsers: - - container: - format: auto - stream: ${kubernetes.hints.activemq.log.stream|'all'} - paths: - - /var/log/containers/*${kubernetes.hints.container_id}.log - prospector: - scanner: - symlinks: true - tags: - - forwarded - - activemq-log - data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/apache.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/apache.yml index bdf487d2d5c..a6e461a5363 100644 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/apache.yml +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/apache.yml @@ -1,19 +1,4 @@ inputs: - - name: apache/metrics-apache - type: apache/metrics - use_output: default - streams: - - condition: ${kubernetes.hints.apache.status.enabled} == true or ${kubernetes.hints.apache.enabled} == true - data_stream: - dataset: apache.status - type: metrics - hosts: - - ${kubernetes.hints.apache.status.host|'http://127.0.0.1'} - metricsets: - - status - period: ${kubernetes.hints.apache.status.period|'30s'} - server_status_path: /server-status - data_stream.namespace: default - name: filestream-apache type: filestream use_output: default @@ -132,3 +117,18 @@ inputs: - forwarded - apache-error data_stream.namespace: default + - name: apache/metrics-apache + type: apache/metrics + use_output: default + streams: + - condition: ${kubernetes.hints.apache.status.enabled} == true or ${kubernetes.hints.apache.enabled} == true + data_stream: + dataset: apache.status + type: metrics + hosts: + - ${kubernetes.hints.apache.status.host|'http://127.0.0.1'} + metricsets: + - status + period: ${kubernetes.hints.apache.status.period|'30s'} + server_status_path: /server-status + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/cassandra.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/cassandra.yml index 296b330c807..bce4edf635c 100644 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/cassandra.yml +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/cassandra.yml @@ -1,4 +1,31 @@ inputs: + - name: filestream-cassandra + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.cassandra.log.enabled} == true or ${kubernetes.hints.cassandra.enabled} == true + data_stream: + dataset: cassandra.log + type: logs + exclude_files: + - .gz$ + multiline: + match: after + negate: true + pattern: ^([A-Z]) + parsers: + - container: + format: auto + stream: ${kubernetes.hints.cassandra.log.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - cassandra-systemlogs + data_stream.namespace: default - name: jolokia/metrics-cassandra type: jolokia/metrics use_output: default @@ -298,30 +325,3 @@ inputs: period: ${kubernetes.hints.cassandra.metrics.period|'10s'} username: ${kubernetes.hints.cassandra.metrics.username|'admin'} data_stream.namespace: default - - name: filestream-cassandra - type: filestream - use_output: default - streams: - - condition: ${kubernetes.hints.cassandra.log.enabled} == true or ${kubernetes.hints.cassandra.enabled} == true - data_stream: - dataset: cassandra.log - type: logs - exclude_files: - - .gz$ - multiline: - match: after - negate: true - pattern: ^([A-Z]) - parsers: - - container: - format: auto - stream: ${kubernetes.hints.cassandra.log.stream|'all'} - paths: - - /var/log/containers/*${kubernetes.hints.container_id}.log - prospector: - scanner: - symlinks: true - tags: - - forwarded - - cassandra-systemlogs - data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/cloud_security_posture.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/cloud_security_posture.yml deleted file mode 100644 index bbc867294c7..00000000000 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/cloud_security_posture.yml +++ /dev/null @@ -1,93 +0,0 @@ -inputs: - - name: cloudbeat/cis_k8s-cloud_security_posture - type: cloudbeat/cis_k8s - use_output: default - streams: - - condition: ${kubernetes.hints.cloud_security_posture.findings.enabled} == true or ${kubernetes.hints.cloud_security_posture.enabled} == true - data_stream: - dataset: cloud_security_posture.findings - type: logs - evaluator: - decision_logs: false - fetchers: - - name: kube-api - - directory: /hostfs - name: process - processes: - etcd: null - kube-apiserver: null - kube-controller: null - kube-scheduler: null - kubelet: - config-file-arguments: - - config - - name: file-system - patterns: - - /hostfs/etc/kubernetes/scheduler.conf - - /hostfs/etc/kubernetes/controller-manager.conf - - /hostfs/etc/kubernetes/admin.conf - - /hostfs/etc/kubernetes/kubelet.conf - - /hostfs/etc/kubernetes/manifests/etcd.yaml - - /hostfs/etc/kubernetes/manifests/kube-apiserver.yaml - - /hostfs/etc/kubernetes/manifests/kube-controller-manager.yaml - - /hostfs/etc/kubernetes/manifests/kube-scheduler.yaml - - /hostfs/etc/systemd/system/kubelet.service.d/10-kubeadm.conf - - /hostfs/etc/kubernetes/pki/* - - /hostfs/var/lib/kubelet/config.yaml - - /hostfs/var/lib/etcd - - /hostfs/etc/kubernetes/pki - name: Findings - period: 4h - processors: - - add_cluster_id: null - data_stream.namespace: default - - name: cloudbeat/cis_eks-cloud_security_posture - type: cloudbeat/cis_eks - use_output: default - streams: - - condition: ${kubernetes.hints.cloud_security_posture.findings.enabled} == true and ${kubernetes.hints.cloud_security_posture.enabled} == true - data_stream: - dataset: cloud_security_posture.findings - type: logs - evaluator: - decision_logs: false - fetchers: - - name: kube-api - - directory: /hostfs - name: process - processes: - kubelet: - config-file-arguments: - - config - - name: aws-ecr - - name: aws-elb - - name: file-system - patterns: - - /hostfs/etc/kubernetes/kubelet/kubelet-config.json - - /hostfs/var/lib/kubelet/kubeconfig - name: Findings - period: 4h - processors: - - add_cluster_id: null - data_stream.namespace: default - - name: filestream-cloud_security_posture - type: filestream - use_output: default - streams: - - condition: ${kubernetes.hints.cloud_security_posture.container_logs.enabled} == true - data_stream: - dataset: kubernetes.container_logs - type: logs - exclude_files: [] - exclude_lines: [] - parsers: - - container: - format: auto - stream: all - paths: - - /var/log/containers/*${kubernetes.hints.container_id}.log - prospector: - scanner: - symlinks: true - tags: [] - data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/cockroachdb.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/cockroachdb.yml index 531706b7345..3e55b02794d 100644 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/cockroachdb.yml +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/cockroachdb.yml @@ -1,25 +1,4 @@ inputs: - - name: filestream-cockroachdb - type: filestream - use_output: default - streams: - - condition: ${kubernetes.hints.cockroachdb.container_logs.enabled} == true - data_stream: - dataset: kubernetes.container_logs - type: logs - exclude_files: [] - exclude_lines: [] - parsers: - - container: - format: auto - stream: all - paths: - - /var/log/containers/*${kubernetes.hints.container_id}.log - prospector: - scanner: - symlinks: true - tags: [] - data_stream.namespace: default - name: prometheus/metrics-cockroachdb type: prometheus/metrics use_output: default @@ -42,3 +21,24 @@ inputs: use_types: true username: null data_stream.namespace: default + - name: filestream-cockroachdb + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.cockroachdb.container_logs.enabled} == true + data_stream: + dataset: kubernetes.container_logs + type: logs + exclude_files: [] + exclude_lines: [] + parsers: + - container: + format: auto + stream: all + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + prospector: + scanner: + symlinks: true + tags: [] + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/cyberarkpas.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/cyberarkpas.yml index 4dc9361aa41..fc8f72c6206 100644 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/cyberarkpas.yml +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/cyberarkpas.yml @@ -1,30 +1,22 @@ inputs: - - name: filestream-cyberarkpas - type: filestream + - name: tcp-cyberarkpas + type: tcp use_output: default streams: - - condition: ${kubernetes.hints.cyberarkpas.audit.enabled} == true and ${kubernetes.hints.cyberarkpas.enabled} == true + - condition: ${kubernetes.hints.cyberarkpas.audit.enabled} == true or ${kubernetes.hints.cyberarkpas.enabled} == true data_stream: dataset: cyberarkpas.audit type: logs - exclude_files: - - .gz$ - parsers: - - container: - format: auto - stream: ${kubernetes.hints.cyberarkpas.audit.stream|'all'} - paths: null + host: localhost:9301 processors: - add_locale: null - prospector: - scanner: - symlinks: true tags: - - forwarded - cyberarkpas-audit + - forwarded + tcp: null data_stream.namespace: default - - name: tcp-cyberarkpas - type: tcp + - name: udp-cyberarkpas + type: udp use_output: default streams: - condition: ${kubernetes.hints.cyberarkpas.audit.enabled} == true or ${kubernetes.hints.cyberarkpas.enabled} == true @@ -37,21 +29,29 @@ inputs: tags: - cyberarkpas-audit - forwarded - tcp: null + udp: null data_stream.namespace: default - - name: udp-cyberarkpas - type: udp + - name: filestream-cyberarkpas + type: filestream use_output: default streams: - - condition: ${kubernetes.hints.cyberarkpas.audit.enabled} == true or ${kubernetes.hints.cyberarkpas.enabled} == true + - condition: ${kubernetes.hints.cyberarkpas.audit.enabled} == true and ${kubernetes.hints.cyberarkpas.enabled} == true data_stream: dataset: cyberarkpas.audit type: logs - host: localhost:9301 + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.cyberarkpas.audit.stream|'all'} + paths: null processors: - add_locale: null + prospector: + scanner: + symlinks: true tags: - - cyberarkpas-audit - forwarded - udp: null + - cyberarkpas-audit data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/fireeye.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/fireeye.yml index 8e226e0d925..44b8074cb5a 100644 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/fireeye.yml +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/fireeye.yml @@ -1,21 +1,4 @@ inputs: - - name: tcp-fireeye - type: tcp - use_output: default - streams: - - condition: ${kubernetes.hints.fireeye.nx.enabled} == true or ${kubernetes.hints.fireeye.enabled} == true - data_stream: - dataset: fireeye.nx - type: logs - fields_under_root: true - host: localhost:9523 - processors: - - add_locale: null - tags: - - fireeye-nx - - forwarded - tcp: null - data_stream.namespace: default - name: filestream-fireeye type: filestream use_output: default @@ -57,3 +40,20 @@ inputs: - forwarded udp: null data_stream.namespace: default + - name: tcp-fireeye + type: tcp + use_output: default + streams: + - condition: ${kubernetes.hints.fireeye.nx.enabled} == true or ${kubernetes.hints.fireeye.enabled} == true + data_stream: + dataset: fireeye.nx + type: logs + fields_under_root: true + host: localhost:9523 + processors: + - add_locale: null + tags: + - fireeye-nx + - forwarded + tcp: null + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/haproxy.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/haproxy.yml index 0f1debdee34..cff5d5821aa 100644 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/haproxy.yml +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/haproxy.yml @@ -1,4 +1,20 @@ inputs: + - name: syslog-haproxy + type: syslog + use_output: default + streams: + - condition: ${kubernetes.hints.haproxy.log.enabled} == true or ${kubernetes.hints.haproxy.enabled} == true + data_stream: + dataset: haproxy.log + type: logs + processors: + - add_locale: null + protocol.udp: + host: localhost:9001 + tags: + - forwarded + - haproxy-log + data_stream.namespace: default - name: haproxy/metrics-haproxy type: haproxy/metrics use_output: default @@ -50,19 +66,3 @@ inputs: tags: - haproxy-log data_stream.namespace: default - - name: syslog-haproxy - type: syslog - use_output: default - streams: - - condition: ${kubernetes.hints.haproxy.log.enabled} == true or ${kubernetes.hints.haproxy.enabled} == true - data_stream: - dataset: haproxy.log - type: logs - processors: - - add_locale: null - protocol.udp: - host: localhost:9001 - tags: - - forwarded - - haproxy-log - data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/infoblox_nios.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/infoblox_nios.yml index ad76a72b86b..d260fead6a6 100644 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/infoblox_nios.yml +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/infoblox_nios.yml @@ -9,6 +9,10 @@ inputs: type: logs exclude_files: - .gz$ + fields: + _conf: + tz_offset: local + fields_under_root: true parsers: - container: format: auto @@ -31,6 +35,10 @@ inputs: data_stream: dataset: infoblox_nios.log type: logs + fields: + _conf: + tz_offset: local + fields_under_root: true host: localhost:9027 tags: - forwarded @@ -44,6 +52,10 @@ inputs: data_stream: dataset: infoblox_nios.log type: logs + fields: + _conf: + tz_offset: local + fields_under_root: true host: localhost:9028 tags: - forwarded diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/kafka.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/kafka.yml index c35cff8619d..b79eebbcfb0 100644 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/kafka.yml +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/kafka.yml @@ -1,4 +1,32 @@ inputs: + - name: filestream-kafka + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.kafka.log.enabled} == true or ${kubernetes.hints.kafka.enabled} == true + data_stream: + dataset: kafka.log + type: logs + exclude_files: + - .gz$ + multiline: + match: after + negate: true + pattern: ^\[ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.kafka.log.stream|'all'} + paths: + - /opt/kafka*/var/log/containers/*${kubernetes.hints.container_id}.log + processors: + - add_locale: null + prospector: + scanner: + symlinks: true + tags: + - kafka-log + data_stream.namespace: default - name: kafka/metrics-kafka type: kafka/metrics use_output: default @@ -31,31 +59,3 @@ inputs: - partition period: ${kubernetes.hints.kafka.partition.period|'10s'} data_stream.namespace: default - - name: filestream-kafka - type: filestream - use_output: default - streams: - - condition: ${kubernetes.hints.kafka.log.enabled} == true or ${kubernetes.hints.kafka.enabled} == true - data_stream: - dataset: kafka.log - type: logs - exclude_files: - - .gz$ - multiline: - match: after - negate: true - pattern: ^\[ - parsers: - - container: - format: auto - stream: ${kubernetes.hints.kafka.log.stream|'all'} - paths: - - /opt/kafka*/var/log/containers/*${kubernetes.hints.container_id}.log - processors: - - add_locale: null - prospector: - scanner: - symlinks: true - tags: - - kafka-log - data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/mongodb.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/mongodb.yml index bf47b9628da..ece2d4439eb 100644 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/mongodb.yml +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/mongodb.yml @@ -1,4 +1,26 @@ inputs: + - name: filestream-mongodb + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.mongodb.log.enabled} == true or ${kubernetes.hints.mongodb.enabled} == true + data_stream: + dataset: mongodb.log + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.mongodb.log.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + prospector: + scanner: + symlinks: true + tags: + - mongodb-logs + data_stream.namespace: default - name: mongodb/metrics-mongodb type: mongodb/metrics use_output: default @@ -49,25 +71,3 @@ inputs: - status period: ${kubernetes.hints.mongodb.status.period|'10s'} data_stream.namespace: default - - name: filestream-mongodb - type: filestream - use_output: default - streams: - - condition: ${kubernetes.hints.mongodb.log.enabled} == true or ${kubernetes.hints.mongodb.enabled} == true - data_stream: - dataset: mongodb.log - type: logs - exclude_files: - - .gz$ - parsers: - - container: - format: auto - stream: ${kubernetes.hints.mongodb.log.stream|'all'} - paths: - - /var/log/containers/*${kubernetes.hints.container_id}.log - prospector: - scanner: - symlinks: true - tags: - - mongodb-logs - data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/nginx.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/nginx.yml index f0c166bbfbb..a9b6693e372 100644 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/nginx.yml +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/nginx.yml @@ -1,4 +1,19 @@ inputs: + - name: nginx/metrics-nginx + type: nginx/metrics + use_output: default + streams: + - condition: ${kubernetes.hints.nginx.stubstatus.enabled} == true or ${kubernetes.hints.nginx.enabled} == true + data_stream: + dataset: nginx.stubstatus + type: metrics + hosts: + - ${kubernetes.hints.nginx.stubstatus.host|'http://127.0.0.1:80'} + metricsets: + - stubstatus + period: ${kubernetes.hints.nginx.stubstatus.period|'10s'} + server_status_path: /nginx_status + data_stream.namespace: default - name: filestream-nginx type: filestream use_output: default @@ -125,18 +140,3 @@ inputs: - forwarded - nginx-error data_stream.namespace: default - - name: nginx/metrics-nginx - type: nginx/metrics - use_output: default - streams: - - condition: ${kubernetes.hints.nginx.stubstatus.enabled} == true or ${kubernetes.hints.nginx.enabled} == true - data_stream: - dataset: nginx.stubstatus - type: metrics - hosts: - - ${kubernetes.hints.nginx.stubstatus.host|'http://127.0.0.1:80'} - metricsets: - - stubstatus - period: ${kubernetes.hints.nginx.stubstatus.period|'10s'} - server_status_path: /nginx_status - data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/osquery.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/osquery.yml deleted file mode 100644 index 6ebd2f12c46..00000000000 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/osquery.yml +++ /dev/null @@ -1,23 +0,0 @@ -inputs: - - name: filestream-osquery - type: filestream - use_output: default - streams: - - condition: ${kubernetes.hints.osquery.result.enabled} == true or ${kubernetes.hints.osquery.enabled} == true - data_stream: - dataset: osquery.result - type: logs - exclude_files: - - .gz$ - parsers: - - container: - format: auto - stream: ${kubernetes.hints.osquery.result.stream|'all'} - paths: - - /var/log/containers/*${kubernetes.hints.container_id}.log - prospector: - scanner: - symlinks: true - tags: - - osquery - data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/osquery_manager.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/osquery_manager.yml deleted file mode 100644 index 6620de9c7de..00000000000 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/osquery_manager.yml +++ /dev/null @@ -1,33 +0,0 @@ -inputs: - - name: osquery-osquery_manager - type: osquery - use_output: default - streams: - - condition: ${kubernetes.hints.osquery_manager.result.enabled} == true or ${kubernetes.hints.osquery_manager.enabled} == true - data_stream: - dataset: osquery_manager.result - type: logs - id: null - query: null - data_stream.namespace: default - - name: filestream-osquery_manager - type: filestream - use_output: default - streams: - - condition: ${kubernetes.hints.osquery_manager.container_logs.enabled} == true - data_stream: - dataset: kubernetes.container_logs - type: logs - exclude_files: [] - exclude_lines: [] - parsers: - - container: - format: auto - stream: all - paths: - - /var/log/containers/*${kubernetes.hints.container_id}.log - prospector: - scanner: - symlinks: true - tags: [] - data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/qnap_nas.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/qnap_nas.yml index a7358abd781..546faa79901 100644 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/qnap_nas.yml +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/qnap_nas.yml @@ -1,4 +1,23 @@ inputs: + - name: udp-qnap_nas + type: udp + use_output: default + streams: + - condition: ${kubernetes.hints.qnap_nas.log.enabled} == true and ${kubernetes.hints.qnap_nas.enabled} == true + data_stream: + dataset: qnap_nas.log + type: logs + host: localhost:9301 + processors: + - add_locale: null + - add_fields: + fields: + tz_offset: local + target: _tmp + tags: + - qnap-nas + - forwarded + data_stream.namespace: default - name: filestream-qnap_nas type: filestream use_output: default @@ -39,22 +58,3 @@ inputs: - qnap-nas - forwarded data_stream.namespace: default - - name: udp-qnap_nas - type: udp - use_output: default - streams: - - condition: ${kubernetes.hints.qnap_nas.log.enabled} == true and ${kubernetes.hints.qnap_nas.enabled} == true - data_stream: - dataset: qnap_nas.log - type: logs - host: localhost:9301 - processors: - - add_locale: null - - add_fields: - fields: - tz_offset: local - target: _tmp - tags: - - qnap-nas - - forwarded - data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/sentinel_one.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/sentinel_one.yml index dcd117dc994..7c06b222d78 100644 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/sentinel_one.yml +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/sentinel_one.yml @@ -1,25 +1,4 @@ inputs: - - name: filestream-sentinel_one - type: filestream - use_output: default - streams: - - condition: ${kubernetes.hints.sentinel_one.container_logs.enabled} == true - data_stream: - dataset: kubernetes.container_logs - type: logs - exclude_files: [] - exclude_lines: [] - parsers: - - container: - format: auto - stream: all - paths: - - /var/log/containers/*${kubernetes.hints.container_id}.log - prospector: - scanner: - symlinks: true - tags: [] - data_stream.namespace: default - name: httpjson-sentinel_one type: httpjson use_output: default @@ -215,3 +194,24 @@ inputs: - forwarded - sentinel_one-threat data_stream.namespace: default + - name: filestream-sentinel_one + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.sentinel_one.container_logs.enabled} == true + data_stream: + dataset: kubernetes.container_logs + type: logs + exclude_files: [] + exclude_lines: [] + parsers: + - container: + format: auto + stream: all + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + prospector: + scanner: + symlinks: true + tags: [] + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/symantec_endpoint.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/symantec_endpoint.yml index fac3f6cbd93..8e3ca7ce297 100644 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/symantec_endpoint.yml +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/symantec_endpoint.yml @@ -1,4 +1,23 @@ inputs: + - name: udp-symantec_endpoint + type: udp + use_output: default + streams: + - condition: ${kubernetes.hints.symantec_endpoint.log.enabled} == true or ${kubernetes.hints.symantec_endpoint.enabled} == true + data_stream: + dataset: symantec_endpoint.log + type: logs + fields: + _conf: + remove_mapped_fields: false + tz_offset: UTC + fields_under_root: true + host: localhost:9008 + max_message_size: 1 MiB + tags: + - symantec-endpoint-log + - forwarded + data_stream.namespace: default - name: filestream-symantec_endpoint type: filestream use_output: default @@ -46,22 +65,3 @@ inputs: - symantec-endpoint-log - forwarded data_stream.namespace: default - - name: udp-symantec_endpoint - type: udp - use_output: default - streams: - - condition: ${kubernetes.hints.symantec_endpoint.log.enabled} == true or ${kubernetes.hints.symantec_endpoint.enabled} == true - data_stream: - dataset: symantec_endpoint.log - type: logs - fields: - _conf: - remove_mapped_fields: false - tz_offset: UTC - fields_under_root: true - host: localhost:9008 - max_message_size: 1 MiB - tags: - - symantec-endpoint-log - - forwarded - data_stream.namespace: default From a0620af9818d36353241a518d00ba34ab2ff9d11 Mon Sep 17 00:00:00 2001 From: Josh Dover <1813008+joshdover@users.noreply.github.com> Date: Mon, 19 Sep 2022 13:15:45 +0200 Subject: [PATCH 128/180] Use at least warning level for all status logs (#1218) --- CHANGELOG.next.asciidoc | 1 + internal/pkg/core/status/reporter.go | 7 +++---- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 6047c71b3d7..b2fc7413192 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -114,6 +114,7 @@ - Agent updates will clean up unneeded artifacts. {issue}693[693] {issue}694[694] {pull}752[752] - Use the Elastic Agent configuration directory as the root of the `inputs.d` folder. {issues}663[663] - Fix a panic caused by a race condition when installing the Elastic Agent. {issues}806[806] +- Use at least warning level for all status logs {pull}1218[1218] - Remove fleet event reporter and events from checkin body. {issue}993[993] ==== New features diff --git a/internal/pkg/core/status/reporter.go b/internal/pkg/core/status/reporter.go index 9e0f47a9b56..848a69326e6 100644 --- a/internal/pkg/core/status/reporter.go +++ b/internal/pkg/core/status/reporter.go @@ -250,10 +250,9 @@ func (r *controller) updateStatus() { } func (r *controller) logStatus(status AgentStatusCode, message string) { - logFn := r.log.Infof - if status == Degraded { - logFn = r.log.Warnf - } else if status == Failed { + // Use at least warning level log for all statuses to make sure they are visible in the logs + logFn := r.log.Warnf + if status == Failed { logFn = r.log.Errorf } From ec2816d7ad572c7f9ec41d119ad907d55653cd6a Mon Sep 17 00:00:00 2001 From: Chris Mark Date: Mon, 19 Sep 2022 14:52:20 +0300 Subject: [PATCH 129/180] Update k8s manifests to leverage hints (#1202) --- .../elastic-agent-standalone-kubernetes.yaml | 27 +++++++++++++++++-- ...-agent-standalone-daemonset-configmap.yaml | 2 ++ .../elastic-agent-standalone-daemonset.yaml | 25 +++++++++++++++-- 3 files changed, 50 insertions(+), 4 deletions(-) diff --git a/deploy/kubernetes/elastic-agent-standalone-kubernetes.yaml b/deploy/kubernetes/elastic-agent-standalone-kubernetes.yaml index 9a8ff40e179..d6ce952dadd 100644 --- a/deploy/kubernetes/elastic-agent-standalone-kubernetes.yaml +++ b/deploy/kubernetes/elastic-agent-standalone-kubernetes.yaml @@ -25,6 +25,8 @@ data: providers.kubernetes: node: ${NODE_NAME} scope: node + #Uncomment to enable hints' support + #hints.enabled: true inputs: - name: kubernetes-cluster-metrics condition: ${kubernetes_leaderelection.leader} == true @@ -651,11 +653,24 @@ spec: serviceAccountName: elastic-agent-standalone hostNetwork: true dnsPolicy: ClusterFirstWithHostNet + # Uncomment if using hints feature + #initContainers: + # - name: k8s-templates-downloader + # image: busybox:1.28 + # command: ['sh'] + # args: + # - -c + # - >- + # mkdir -p /etc/elastic-agent/inputs.d && + # wget -O - https://github.com/elastic/elastic-agent/archive/8.3.0.tar.gz | tar xz -C /etc/elastic-agent/inputs.d --strip=5 "elastic-agent-main/deploy/kubernetes/elastic-agent-standalone/templates.d" + # volumeMounts: + # - name: external-inputs + # mountPath: /etc/elastic-agent/inputs.d containers: - name: elastic-agent-standalone image: docker.elastic.co/beats/elastic-agent:8.3.0 args: [ - "-c", "/etc/agent.yml", + "-c", "/etc/elastic-agent/agent.yml", "-e", ] env: @@ -677,6 +692,8 @@ spec: valueFrom: fieldRef: fieldPath: metadata.name + - name: STATE_PATH + value: "/etc/elastic-agent" securityContext: runAsUser: 0 resources: @@ -687,9 +704,12 @@ spec: memory: 400Mi volumeMounts: - name: datastreams - mountPath: /etc/agent.yml + mountPath: /etc/elastic-agent/agent.yml readOnly: true subPath: agent.yml + # Uncomment if using hints feature + #- name: external-inputs + # mountPath: /etc/elastic-agent/inputs.d - name: proc mountPath: /hostfs/proc readOnly: true @@ -722,6 +742,9 @@ spec: configMap: defaultMode: 0640 name: agent-node-datastreams + # Uncomment if using hints feature + #- name: external-inputs + # emptyDir: {} - name: proc hostPath: path: /proc diff --git a/deploy/kubernetes/elastic-agent-standalone/elastic-agent-standalone-daemonset-configmap.yaml b/deploy/kubernetes/elastic-agent-standalone/elastic-agent-standalone-daemonset-configmap.yaml index 1e42f94af15..15a24fc3c59 100644 --- a/deploy/kubernetes/elastic-agent-standalone/elastic-agent-standalone-daemonset-configmap.yaml +++ b/deploy/kubernetes/elastic-agent-standalone/elastic-agent-standalone-daemonset-configmap.yaml @@ -25,6 +25,8 @@ data: providers.kubernetes: node: ${NODE_NAME} scope: node + #Uncomment to enable hints' support + #hints.enabled: true inputs: - name: kubernetes-cluster-metrics condition: ${kubernetes_leaderelection.leader} == true diff --git a/deploy/kubernetes/elastic-agent-standalone/elastic-agent-standalone-daemonset.yaml b/deploy/kubernetes/elastic-agent-standalone/elastic-agent-standalone-daemonset.yaml index c4846b8b308..675c68c6dfb 100644 --- a/deploy/kubernetes/elastic-agent-standalone/elastic-agent-standalone-daemonset.yaml +++ b/deploy/kubernetes/elastic-agent-standalone/elastic-agent-standalone-daemonset.yaml @@ -25,11 +25,24 @@ spec: serviceAccountName: elastic-agent-standalone hostNetwork: true dnsPolicy: ClusterFirstWithHostNet + # Uncomment if using hints feature + #initContainers: + # - name: k8s-templates-downloader + # image: busybox:1.28 + # command: ['sh'] + # args: + # - -c + # - >- + # mkdir -p /etc/elastic-agent/inputs.d && + # wget -O - https://github.com/elastic/elastic-agent/archive/%VERSION%.tar.gz | tar xz -C /etc/elastic-agent/inputs.d --strip=5 "elastic-agent-main/deploy/kubernetes/elastic-agent-standalone/templates.d" + # volumeMounts: + # - name: external-inputs + # mountPath: /etc/elastic-agent/inputs.d containers: - name: elastic-agent-standalone image: docker.elastic.co/beats/elastic-agent:%VERSION% args: [ - "-c", "/etc/agent.yml", + "-c", "/etc/elastic-agent/agent.yml", "-e", ] env: @@ -51,6 +64,8 @@ spec: valueFrom: fieldRef: fieldPath: metadata.name + - name: STATE_PATH + value: "/etc/elastic-agent" securityContext: runAsUser: 0 resources: @@ -61,9 +76,12 @@ spec: memory: 400Mi volumeMounts: - name: datastreams - mountPath: /etc/agent.yml + mountPath: /etc/elastic-agent/agent.yml readOnly: true subPath: agent.yml + # Uncomment if using hints feature + #- name: external-inputs + # mountPath: /etc/elastic-agent/inputs.d - name: proc mountPath: /hostfs/proc readOnly: true @@ -96,6 +114,9 @@ spec: configMap: defaultMode: 0640 name: agent-node-datastreams + # Uncomment if using hints feature + #- name: external-inputs + # emptyDir: {} - name: proc hostPath: path: /proc From a0a3ed1aae5436b720b8065199afc629fe8930d6 Mon Sep 17 00:00:00 2001 From: Craig MacKenzie Date: Mon, 19 Sep 2022 09:37:18 -0400 Subject: [PATCH 130/180] Add Go 1.18 upgrade to breaking changes section. (#1216) * Add Go 1.18 upgrade to breaking changes section. * Fix the PR number in the changelog. --- CHANGELOG.next.asciidoc | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index b2fc7413192..920ddd16d84 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -18,6 +18,7 @@ - Remove deprecated/undocumented IncludeCreatorMetadata setting from kubernetes metadata config options {pull-beats}[28006] - The `/processes/` endpoint proxies to the subprocess's monitoring endpoint, instead of querying its `/stats` endpoint {pull-beats}[28165] - Remove username/password for fleet-server authentication. {pull-beats}[29458] +- Upgrade to Go 1.18. Certificates signed with SHA-1 are now rejected. See the Go 1.18 https://tip.golang.org/doc/go1.18#sha1[release notes] for details. {pull}832[832] ==== Bugfixes - Fix rename *ConfigChange to *PolicyChange to align on changes in the UI. {pull-beats}[20779] From 6b5e6d0236c1d56c5c9f4c4124fdb9ecd4bb4a44 Mon Sep 17 00:00:00 2001 From: Elastic Machine Date: Wed, 21 Sep 2022 13:17:36 -0400 Subject: [PATCH 131/180] [Release] add-backport-next (#1254) --- .mergify.yml | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/.mergify.yml b/.mergify.yml index 3fe46362854..528df9b498b 100644 --- a/.mergify.yml +++ b/.mergify.yml @@ -220,3 +220,16 @@ pull_request_rules: labels: - "backport" title: "[{{ destination_branch }}](backport #{{ number }}) {{ title }}" + - name: backport patches to 8.5 branch + conditions: + - merged + - label=backport-v8.5.0 + actions: + backport: + assignees: + - "{{ author }}" + branches: + - "8.5" + labels: + - "backport" + title: "[{{ destination_branch }}](backport #{{ number }}) {{ title }}" From 5989089c17e30a39f0b3e0e0a850e4c621e63a0f Mon Sep 17 00:00:00 2001 From: Craig MacKenzie Date: Wed, 21 Sep 2022 13:46:04 -0400 Subject: [PATCH 132/180] Bump version to 8.6.0. (#1259) --- version/version.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/version/version.go b/version/version.go index f5101a34efa..60029093c35 100644 --- a/version/version.go +++ b/version/version.go @@ -4,4 +4,4 @@ package version -const defaultBeatVersion = "8.5.0" +const defaultBeatVersion = "8.6.0" From 7d21718ba30e3d17fbc6c038cd12ecfd18b4ca1b Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Wed, 21 Sep 2022 14:18:26 -0400 Subject: [PATCH 133/180] [Automation] Update elastic stack version to 8.5.0-7dc445a0 for testing (#1248) Co-authored-by: apmmachine --- testing/environments/snapshot.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/testing/environments/snapshot.yml b/testing/environments/snapshot.yml index 3c8cb77b9bd..45cab6ea404 100644 --- a/testing/environments/snapshot.yml +++ b/testing/environments/snapshot.yml @@ -3,7 +3,7 @@ version: '2.3' services: elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch:8.5.0-fcf3d4c2-SNAPSHOT + image: docker.elastic.co/elasticsearch/elasticsearch:8.5.0-7dc445a0-SNAPSHOT # When extend is used it merges healthcheck.tests, see: # https://github.com/docker/compose/issues/8962 # healthcheck: @@ -42,7 +42,7 @@ services: - ./docker/logstash/pki:/etc/pki:ro kibana: - image: docker.elastic.co/kibana/kibana:8.5.0-fcf3d4c2-SNAPSHOT + image: docker.elastic.co/kibana/kibana:8.5.0-7dc445a0-SNAPSHOT environment: - "ELASTICSEARCH_USERNAME=kibana_system_user" - "ELASTICSEARCH_PASSWORD=testing" From 815382d57da0a976c16c8c37ed759c7c2e76794b Mon Sep 17 00:00:00 2001 From: Michal Pristas Date: Thu, 22 Sep 2022 11:51:24 +0200 Subject: [PATCH 134/180] Fix: Endpoint collision between monitoring and regular beats (#1034) Fix: Endpoint collision between monitoring and regular beats (#1034) --- internal/pkg/agent/control/server/server.go | 25 +-- .../pkg/agent/operation/monitoring_test.go | 2 +- internal/pkg/agent/operation/operator.go | 4 +- .../core/monitoring/beats/beats_monitor.go | 41 ++--- .../pkg/core/monitoring/beats/monitoring.go | 20 ++- .../core/monitoring/beats/sidecar_monitor.go | 145 ++++++++++++++++++ internal/pkg/core/monitoring/monitor.go | 2 +- .../pkg/core/monitoring/noop/noop_monitor.go | 5 +- .../pkg/core/monitoring/server/process.go | 5 +- internal/pkg/core/plugin/process/start.go | 36 ++--- 10 files changed, 215 insertions(+), 70 deletions(-) create mode 100644 internal/pkg/core/monitoring/beats/sidecar_monitor.go diff --git a/internal/pkg/agent/control/server/server.go b/internal/pkg/agent/control/server/server.go index 366676540d9..0b89ccd8f71 100644 --- a/internal/pkg/agent/control/server/server.go +++ b/internal/pkg/agent/control/server/server.go @@ -26,7 +26,6 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/agent/control/proto" "github.com/elastic/elastic-agent/internal/pkg/agent/errors" "github.com/elastic/elastic-agent/internal/pkg/agent/program" - "github.com/elastic/elastic-agent/internal/pkg/core/monitoring/beats" monitoring "github.com/elastic/elastic-agent/internal/pkg/core/monitoring/beats" monitoringCfg "github.com/elastic/elastic-agent/internal/pkg/core/monitoring/config" "github.com/elastic/elastic-agent/internal/pkg/core/socket" @@ -37,6 +36,10 @@ import ( "github.com/elastic/elastic-agent/pkg/core/logger" ) +const ( + agentName = "elastic-agent" +) + // Server is the daemon side of the control protocol. type Server struct { logger *logger.Logger @@ -225,7 +228,8 @@ func (s *Server) ProcMeta(ctx context.Context, _ *proto.Empty) (*proto.ProcMetaR // gather spec data for all rk/apps running specs := s.getSpecInfo("", "") for _, si := range specs { - endpoint := monitoring.MonitoringEndpoint(si.spec, runtime.GOOS, si.rk) + isSidecar := strings.HasSuffix(si.app, "_monitoring") + endpoint := monitoring.MonitoringEndpoint(si.spec, runtime.GOOS, si.rk, isSidecar) client := newSocketRequester(si.app, si.rk, endpoint) procMeta := client.procMeta(ctx) @@ -258,9 +262,9 @@ func (s *Server) Pprof(ctx context.Context, req *proto.PprofRequest) (*proto.Ppr ch := make(chan *proto.PprofResult, 1) // retrieve elastic-agent pprof data if requested or application is unspecified. - if req.AppName == "" || req.AppName == "elastic-agent" { - endpoint := beats.AgentMonitoringEndpoint(runtime.GOOS, s.monitoringCfg.HTTP) - c := newSocketRequester("elastic-agent", "", endpoint) + if req.AppName == "" || req.AppName == agentName { + endpoint := monitoring.AgentMonitoringEndpoint(runtime.GOOS, s.monitoringCfg.HTTP) + c := newSocketRequester(agentName, "", endpoint) for _, opt := range req.PprofType { wg.Add(1) go func(opt proto.PprofOption) { @@ -273,11 +277,11 @@ func (s *Server) Pprof(ctx context.Context, req *proto.PprofRequest) (*proto.Ppr // get requested rk/appname spec or all specs var specs []specInfo - if req.AppName != "elastic-agent" { + if req.AppName != agentName { specs = s.getSpecInfo(req.RouteKey, req.AppName) } for _, si := range specs { - endpoint := monitoring.MonitoringEndpoint(si.spec, runtime.GOOS, si.rk) + endpoint := monitoring.MonitoringEndpoint(si.spec, runtime.GOOS, si.rk, false) c := newSocketRequester(si.app, si.rk, endpoint) // Launch a concurrent goroutine to gather all pprof endpoints from a socket. for _, opt := range req.PprofType { @@ -315,8 +319,8 @@ func (s *Server) ProcMetrics(ctx context.Context, _ *proto.Empty) (*proto.ProcMe } // gather metrics buffer data from the elastic-agent - endpoint := beats.AgentMonitoringEndpoint(runtime.GOOS, s.monitoringCfg.HTTP) - c := newSocketRequester("elastic-agent", "", endpoint) + endpoint := monitoring.AgentMonitoringEndpoint(runtime.GOOS, s.monitoringCfg.HTTP) + c := newSocketRequester(agentName, "", endpoint) metrics := c.procMetrics(ctx) resp := &proto.ProcMetricsResponse{ @@ -326,7 +330,8 @@ func (s *Server) ProcMetrics(ctx context.Context, _ *proto.Empty) (*proto.ProcMe // gather metrics buffer data from all other processes specs := s.getSpecInfo("", "") for _, si := range specs { - endpoint := monitoring.MonitoringEndpoint(si.spec, runtime.GOOS, si.rk) + isSidecar := strings.HasSuffix(si.app, "_monitoring") + endpoint := monitoring.MonitoringEndpoint(si.spec, runtime.GOOS, si.rk, isSidecar) client := newSocketRequester(si.app, si.rk, endpoint) s.logger.Infof("gather metrics from %s", endpoint) diff --git a/internal/pkg/agent/operation/monitoring_test.go b/internal/pkg/agent/operation/monitoring_test.go index cc365cae540..06a9cfbe23b 100644 --- a/internal/pkg/agent/operation/monitoring_test.go +++ b/internal/pkg/agent/operation/monitoring_test.go @@ -212,7 +212,7 @@ type testMonitor struct { // EnrichArgs enriches arguments provided to application, in order to enable // monitoring -func (b *testMonitor) EnrichArgs(_ program.Spec, _ string, args []string, _ bool) []string { +func (b *testMonitor) EnrichArgs(_ program.Spec, _ string, args []string) []string { return args } diff --git a/internal/pkg/agent/operation/operator.go b/internal/pkg/agent/operation/operator.go index ed28b7cb633..afe19bf702b 100644 --- a/internal/pkg/agent/operation/operator.go +++ b/internal/pkg/agent/operation/operator.go @@ -27,7 +27,7 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/config" "github.com/elastic/elastic-agent/internal/pkg/core/app" "github.com/elastic/elastic-agent/internal/pkg/core/monitoring" - "github.com/elastic/elastic-agent/internal/pkg/core/monitoring/noop" + "github.com/elastic/elastic-agent/internal/pkg/core/monitoring/beats" "github.com/elastic/elastic-agent/internal/pkg/core/plugin/process" "github.com/elastic/elastic-agent/internal/pkg/core/plugin/service" "github.com/elastic/elastic-agent/internal/pkg/core/state" @@ -387,7 +387,7 @@ func (o *Operator) getApp(p Descriptor) (Application, error) { appName := p.BinaryName() if app.IsSidecar(p) { // make watchers unmonitorable - monitor = noop.NewMonitor() + monitor = beats.NewSidecarMonitor(o.config.DownloadConfig, o.config.MonitoringConfig) appName += "_monitoring" } diff --git a/internal/pkg/core/monitoring/beats/beats_monitor.go b/internal/pkg/core/monitoring/beats/beats_monitor.go index a513729497b..3ea17ae1384 100644 --- a/internal/pkg/core/monitoring/beats/beats_monitor.go +++ b/internal/pkg/core/monitoring/beats/beats_monitor.go @@ -5,7 +5,6 @@ package beats import ( - "fmt" "net/url" "os" "path/filepath" @@ -20,8 +19,13 @@ import ( monitoringConfig "github.com/elastic/elastic-agent/internal/pkg/core/monitoring/config" ) -const httpPlusPrefix = "http+" -const defaultMonitoringNamespace = "default" +const ( + httpPlusPrefix = "http+" + defaultMonitoringNamespace = "default" + fileSchemePrefix = "file" + unixSchemePrefix = "unix" + windowsOS = "windows" +) // Monitor implements the monitoring.Monitor interface providing information // about beats. @@ -99,15 +103,11 @@ func (b *Monitor) WatchLogs() bool { return b.config.Enabled && b.config.Monitor func (b *Monitor) WatchMetrics() bool { return b.config.Enabled && b.config.MonitorMetrics } func (b *Monitor) generateMonitoringEndpoint(spec program.Spec, pipelineID string) string { - return MonitoringEndpoint(spec, b.operatingSystem, pipelineID) -} - -func (b *Monitor) generateLoggingFile(spec program.Spec, pipelineID string) string { - return getLoggingFile(spec, b.operatingSystem, b.installPath, pipelineID) + return MonitoringEndpoint(spec, b.operatingSystem, pipelineID, false) } func (b *Monitor) generateLoggingPath(spec program.Spec, pipelineID string) string { - return filepath.Dir(b.generateLoggingFile(spec, pipelineID)) + return filepath.Dir(getLoggingFile(spec, b.operatingSystem, pipelineID)) } func (b *Monitor) ownLoggingPath(spec program.Spec) bool { @@ -118,15 +118,10 @@ func (b *Monitor) ownLoggingPath(spec program.Spec) bool { // EnrichArgs enriches arguments provided to application, in order to enable // monitoring -func (b *Monitor) EnrichArgs(spec program.Spec, pipelineID string, args []string, isSidecar bool) []string { +func (b *Monitor) EnrichArgs(spec program.Spec, pipelineID string, args []string) []string { appendix := make([]string, 0, 7) - monitoringEndpoint := b.generateMonitoringEndpoint(spec, pipelineID) - if monitoringEndpoint != "" { - endpoint := monitoringEndpoint - if isSidecar { - endpoint += "_monitor" - } + if endpoint := b.generateMonitoringEndpoint(spec, pipelineID); endpoint != "" { appendix = append(appendix, "-E", "http.enabled=true", "-E", "http.host="+endpoint, @@ -146,10 +141,6 @@ func (b *Monitor) EnrichArgs(spec program.Spec, pipelineID string, args []string loggingPath := b.generateLoggingPath(spec, pipelineID) if loggingPath != "" { logFile := spec.Cmd - if isSidecar { - logFile += "_monitor" - } - logFile = fmt.Sprintf("%s", logFile) appendix = append(appendix, "-E", "logging.files.path="+loggingPath, "-E", "logging.files.name="+logFile, @@ -224,7 +215,7 @@ func (b *Monitor) LogPath(spec program.Spec, pipelineID string) string { return "" } - return b.generateLoggingFile(spec, pipelineID) + return getLoggingFile(spec, b.operatingSystem, pipelineID) } // MetricsPath describes a location where application exposes metrics @@ -272,15 +263,15 @@ func monitoringDrop(path string) (drop string) { } u, _ := url.Parse(path) - if u == nil || (u.Scheme != "" && u.Scheme != "file" && u.Scheme != "unix") { + if u == nil || (u.Scheme != "" && u.Scheme != fileSchemePrefix && u.Scheme != unixSchemePrefix) { return "" } - if u.Scheme == "file" { + if u.Scheme == fileSchemePrefix { return strings.TrimPrefix(path, "file://") } - if u.Scheme == "unix" { + if u.Scheme == unixSchemePrefix { return strings.TrimPrefix(path, "unix://") } @@ -299,7 +290,7 @@ func isWindowsPath(path string) bool { } func changeOwner(path string, uid, gid int) error { - if runtime.GOOS == "windows" { + if runtime.GOOS == windowsOS { // on windows it always returns the syscall.EWINDOWS error, wrapped in *PathError return nil } diff --git a/internal/pkg/core/monitoring/beats/monitoring.go b/internal/pkg/core/monitoring/beats/monitoring.go index a724e6f4246..94f3078ddee 100644 --- a/internal/pkg/core/monitoring/beats/monitoring.go +++ b/internal/pkg/core/monitoring/beats/monitoring.go @@ -27,19 +27,27 @@ const ( agentMbEndpointFileFormatWin = `npipe:///elastic-agent` // agentMbEndpointHTTP is used with cloud and exposes metrics on http endpoint agentMbEndpointHTTP = "http://%s:%d" + + monitorSuffix = "_monitor" ) // MonitoringEndpoint is an endpoint where process is exposing its metrics. -func MonitoringEndpoint(spec program.Spec, operatingSystem, pipelineID string) string { +func MonitoringEndpoint(spec program.Spec, operatingSystem, pipelineID string, isSidecar bool) (endpointPath string) { + defer func() { + if isSidecar && endpointPath != "" { + endpointPath += monitorSuffix + } + }() + if endpoint, ok := spec.MetricEndpoints[operatingSystem]; ok { return endpoint } - if operatingSystem == "windows" { + if operatingSystem == windowsOS { return fmt.Sprintf(mbEndpointFileFormatWin, pipelineID, spec.Cmd) } // unix socket path must be less than 104 characters path := fmt.Sprintf("unix://%s.sock", filepath.Join(paths.TempDir(), pipelineID, spec.Cmd, spec.Cmd)) - if len(path) < 104 { + if (isSidecar && len(path) < 104-len(monitorSuffix)) || (!isSidecar && len(path) < 104) { return path } // place in global /tmp (or /var/tmp on Darwin) to ensure that its small enough to fit; current path is way to long @@ -47,11 +55,11 @@ func MonitoringEndpoint(spec program.Spec, operatingSystem, pipelineID string) s return fmt.Sprintf(`unix:///tmp/elastic-agent/%x.sock`, sha256.Sum256([]byte(path))) } -func getLoggingFile(spec program.Spec, operatingSystem, installPath, pipelineID string) string { +func getLoggingFile(spec program.Spec, operatingSystem, pipelineID string) string { if path, ok := spec.LogPaths[operatingSystem]; ok { return path } - if operatingSystem == "windows" { + if operatingSystem == windowsOS { return fmt.Sprintf(logFileFormatWin, paths.Home(), pipelineID, spec.Cmd) } return fmt.Sprintf(logFileFormat, paths.Home(), pipelineID, spec.Cmd) @@ -63,7 +71,7 @@ func AgentMonitoringEndpoint(operatingSystem string, cfg *monitoringConfig.Monit return fmt.Sprintf(agentMbEndpointHTTP, cfg.Host, cfg.Port) } - if operatingSystem == "windows" { + if operatingSystem == windowsOS { return agentMbEndpointFileFormatWin } // unix socket path must be less than 104 characters diff --git a/internal/pkg/core/monitoring/beats/sidecar_monitor.go b/internal/pkg/core/monitoring/beats/sidecar_monitor.go new file mode 100644 index 00000000000..aa249bafa0f --- /dev/null +++ b/internal/pkg/core/monitoring/beats/sidecar_monitor.go @@ -0,0 +1,145 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package beats + +import ( + "fmt" + "os" + + "github.com/elastic/elastic-agent/internal/pkg/agent/configuration" + "github.com/elastic/elastic-agent/internal/pkg/agent/errors" + "github.com/elastic/elastic-agent/internal/pkg/agent/program" + "github.com/elastic/elastic-agent/internal/pkg/artifact" + "github.com/elastic/elastic-agent/internal/pkg/config" + monitoringConfig "github.com/elastic/elastic-agent/internal/pkg/core/monitoring/config" +) + +// SidecarMonitor provides information about the way how beat is monitored +type SidecarMonitor struct { + operatingSystem string + config *monitoringConfig.MonitoringConfig +} + +// NewSidecarMonitor creates a beats sidecar monitor, functionality is restricted purely on exposing +// http endpoint for diagnostics. +func NewSidecarMonitor(downloadConfig *artifact.Config, monitoringCfg *monitoringConfig.MonitoringConfig) *SidecarMonitor { + if monitoringCfg == nil { + monitoringCfg = monitoringConfig.DefaultConfig() + monitoringCfg.Pprof = &monitoringConfig.PprofConfig{Enabled: false} + monitoringCfg.HTTP.Buffer = &monitoringConfig.BufferConfig{Enabled: false} + } + + return &SidecarMonitor{ + operatingSystem: downloadConfig.OS(), + config: monitoringCfg, + } +} + +// Reload reloads state of the monitoring based on config. +func (b *SidecarMonitor) Reload(rawConfig *config.Config) error { + cfg := configuration.DefaultConfiguration() + if err := rawConfig.Unpack(&cfg); err != nil { + return err + } + + if cfg == nil || cfg.Settings == nil || cfg.Settings.MonitoringConfig == nil { + b.config = monitoringConfig.DefaultConfig() + } else { + if cfg.Settings.MonitoringConfig.Pprof == nil { + cfg.Settings.MonitoringConfig.Pprof = b.config.Pprof + } + if cfg.Settings.MonitoringConfig.HTTP.Buffer == nil { + cfg.Settings.MonitoringConfig.HTTP.Buffer = b.config.HTTP.Buffer + } + b.config = cfg.Settings.MonitoringConfig + } + + return nil +} + +// EnrichArgs enriches arguments provided to application, in order to enable +// monitoring +func (b *SidecarMonitor) EnrichArgs(spec program.Spec, pipelineID string, args []string) []string { + appendix := make([]string, 0, 7) + + if endpoint := MonitoringEndpoint(spec, b.operatingSystem, pipelineID, true); endpoint != "" { + appendix = append(appendix, + "-E", "http.enabled=true", + "-E", "http.host="+endpoint, + ) + if b.config.Pprof != nil && b.config.Pprof.Enabled { + appendix = append(appendix, + "-E", "http.pprof.enabled=true", + ) + } + if b.config.HTTP.Buffer != nil && b.config.HTTP.Buffer.Enabled { + appendix = append(appendix, + "-E", "http.buffer.enabled=true", + ) + } + } + + return append(args, appendix...) +} + +// Cleanup cleans up all drops. +func (b *SidecarMonitor) Cleanup(spec program.Spec, pipelineID string) error { + endpoint := MonitoringEndpoint(spec, b.operatingSystem, pipelineID, true) + drop := monitoringDrop(endpoint) + + return os.RemoveAll(drop) +} + +// Close disables monitoring +func (b *SidecarMonitor) Close() { + b.config.Enabled = false + b.config.MonitorMetrics = false + b.config.MonitorLogs = false +} + +// Prepare executes steps in order for monitoring to work correctly +func (b *SidecarMonitor) Prepare(spec program.Spec, pipelineID string, uid, gid int) error { + endpoint := MonitoringEndpoint(spec, b.operatingSystem, pipelineID, true) + drop := monitoringDrop(endpoint) + + if err := os.MkdirAll(drop, 0775); err != nil { + return errors.New(err, fmt.Sprintf("failed to create a directory %q", drop)) + } + + if err := changeOwner(drop, uid, gid); err != nil { + return errors.New(err, fmt.Sprintf("failed to change owner of a directory %q", drop)) + } + + return nil +} + +// LogPath describes a path where application stores logs. Empty if +// application is not monitorable +func (b *SidecarMonitor) LogPath(program.Spec, string) string { + return "" +} + +// MetricsPath describes a location where application exposes metrics +// collectable by metricbeat. +func (b *SidecarMonitor) MetricsPath(program.Spec, string) string { + return "" +} + +// MetricsPathPrefixed return metrics path prefixed with http+ prefix. +func (b *SidecarMonitor) MetricsPathPrefixed(program.Spec, string) string { + return "" +} + +// IsMonitoringEnabled returns true if monitoring is configured. +func (b *SidecarMonitor) IsMonitoringEnabled() bool { return false } + +// WatchLogs return true if monitoring is configured and monitoring logs is enabled. +func (b *SidecarMonitor) WatchLogs() bool { return false } + +// WatchMetrics return true if monitoring is configured and monitoring metrics is enabled. +func (b *SidecarMonitor) WatchMetrics() bool { return false } + +// MonitoringNamespace returns monitoring namespace configured. +func (b *SidecarMonitor) MonitoringNamespace() string { return "default" } diff --git a/internal/pkg/core/monitoring/monitor.go b/internal/pkg/core/monitoring/monitor.go index 2c87e384976..2c6ff4bd5bc 100644 --- a/internal/pkg/core/monitoring/monitor.go +++ b/internal/pkg/core/monitoring/monitor.go @@ -19,7 +19,7 @@ type Monitor interface { MetricsPathPrefixed(spec program.Spec, pipelineID string) string Prepare(spec program.Spec, pipelineID string, uid, gid int) error - EnrichArgs(spec program.Spec, pipelineID string, args []string, isSidecar bool) []string + EnrichArgs(spec program.Spec, pipelineID string, args []string) []string Cleanup(spec program.Spec, pipelineID string) error Reload(cfg *config.Config) error IsMonitoringEnabled() bool diff --git a/internal/pkg/core/monitoring/noop/noop_monitor.go b/internal/pkg/core/monitoring/noop/noop_monitor.go index 44e47982455..d04eb08feec 100644 --- a/internal/pkg/core/monitoring/noop/noop_monitor.go +++ b/internal/pkg/core/monitoring/noop/noop_monitor.go @@ -11,8 +11,7 @@ import ( // Monitor is a monitoring interface providing information about the way // how beat is monitored -type Monitor struct { -} +type Monitor struct{} // NewMonitor creates a beats monitor. func NewMonitor() *Monitor { @@ -21,7 +20,7 @@ func NewMonitor() *Monitor { // EnrichArgs enriches arguments provided to application, in order to enable // monitoring -func (b *Monitor) EnrichArgs(_ program.Spec, _ string, args []string, _ bool) []string { +func (b *Monitor) EnrichArgs(_ program.Spec, _ string, args []string) []string { return args } diff --git a/internal/pkg/core/monitoring/server/process.go b/internal/pkg/core/monitoring/server/process.go index 56f7d26eb78..2c1fbf04bcf 100644 --- a/internal/pkg/core/monitoring/server/process.go +++ b/internal/pkg/core/monitoring/server/process.go @@ -150,15 +150,12 @@ func generateEndpoint(id string) (string, error) { return "", err } - endpoint := beats.MonitoringEndpoint(detail.spec, artifact.DefaultConfig().OS(), detail.output) + endpoint := beats.MonitoringEndpoint(detail.spec, artifact.DefaultConfig().OS(), detail.output, detail.isMonitoring) if !strings.HasPrefix(endpoint, httpPlusPrefix) && !strings.HasPrefix(endpoint, "http") { // add prefix for npipe and unix endpoint = httpPlusPrefix + endpoint } - if detail.isMonitoring { - endpoint += "_monitor" - } return endpoint, nil } diff --git a/internal/pkg/core/plugin/process/start.go b/internal/pkg/core/plugin/process/start.go index 29770ae714b..1f193a21690 100644 --- a/internal/pkg/core/plugin/process/start.go +++ b/internal/pkg/core/plugin/process/start.go @@ -23,6 +23,13 @@ import ( "github.com/elastic/elastic-agent/pkg/core/server" ) +const ( + levelInfo = "info" + levelDebug = "debug" + levelWarning = "warning" + levelError = "error" +) + // Start starts the application with a specified config. func (a *Application) Start(ctx context.Context, t app.Taggable, cfg map[string]interface{}) error { a.appLock.Lock() @@ -74,8 +81,8 @@ func (a *Application) start(ctx context.Context, t app.Taggable, cfg map[string] // Failed applications can be started again. if srvState != nil { a.setState(state.Starting, "Starting", nil) - srvState.SetStatus(proto.StateObserved_STARTING, a.state.Message, a.state.Payload) - srvState.UpdateConfig(srvState.Config()) + _ = srvState.SetStatus(proto.StateObserved_STARTING, a.state.Message, a.state.Payload) + _ = srvState.UpdateConfig(srvState.Config()) } else { a.srvState, err = a.srv.Register(a, string(cfgStr)) if err != nil { @@ -119,8 +126,7 @@ func (a *Application) start(ctx context.Context, t app.Taggable, cfg map[string] spec.Args = injectLogLevel(a.logLevel, spec.Args) // use separate file - isSidecar := app.IsSidecar(t) - spec.Args = a.monitor.EnrichArgs(a.desc.Spec(), a.pipelineID, spec.Args, isSidecar) + spec.Args = a.monitor.EnrichArgs(a.desc.Spec(), a.pipelineID, spec.Args) // specify beat name to avoid data lock conflicts // as for https://github.com/elastic/beats/v7/pull/14030 more than one instance @@ -165,24 +171,18 @@ func (a *Application) writeToStdin(as *server.ApplicationState, wc io.WriteClose } func injectLogLevel(logLevel string, args []string) []string { - var level string - // Translate to level beat understands - switch logLevel { - case "info": - level = "info" - case "debug": - level = "debug" - case "warning": - level = "warning" - case "error": - level = "error" + if args == nil || logLevel == "" { + return args } - if args == nil || level == "" { - return args + if logLevel == levelDebug || + logLevel == levelInfo || + logLevel == levelWarning || + logLevel == levelError { + return append(args, "-E", "logging.level="+logLevel) } - return append(args, "-E", "logging.level="+level) + return args } func injectDataPath(args []string, pipelineID, id string) []string { From 28fab94fa85debfee5dc8c7ac943b7b95433bdde Mon Sep 17 00:00:00 2001 From: Dan Kortschak <90160302+efd6@users.noreply.github.com> Date: Fri, 23 Sep 2022 20:53:13 +0930 Subject: [PATCH 135/180] internal/pkg/agent/cmd: don't format error message with nil errors (#1240) The failure conditions allow nil errors to result in an error being formatted, when formatting due to a non-accepted HTTP status code and a nil error, omit the error. Co-authored-by: Craig MacKenzie --- internal/pkg/agent/cmd/container.go | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/internal/pkg/agent/cmd/container.go b/internal/pkg/agent/cmd/container.go index 047d51a8fef..44fc531b8ca 100644 --- a/internal/pkg/agent/cmd/container.go +++ b/internal/pkg/agent/cmd/container.go @@ -653,8 +653,13 @@ func performGET(cfg setupConfig, client *kibana.Client, path string, response in for i := 0; i < cfg.Kibana.RetryMaxCount; i++ { code, result, err := client.Connection.Request("GET", path, nil, nil, nil) if err != nil || code != 200 { - err = fmt.Errorf("http GET request to %s%s fails: %w. Response: %s", - client.Connection.URL, path, err, truncateString(result)) + if err != nil { + err = fmt.Errorf("http GET request to %s%s fails: %w. Response: %s", + client.Connection.URL, path, err, truncateString(result)) + } else { + err = fmt.Errorf("http GET request to %s%s fails. StatusCode: %d Response: %s", + client.Connection.URL, path, code, truncateString(result)) + } fmt.Fprintf(writer, "%s failed: %s\n", msg, err) <-time.After(cfg.Kibana.RetrySleepDuration) continue @@ -672,8 +677,13 @@ func performPOST(cfg setupConfig, client *kibana.Client, path string, writer io. for i := 0; i < cfg.Kibana.RetryMaxCount; i++ { code, result, err := client.Connection.Request("POST", path, nil, nil, nil) if err != nil || code >= 400 { - err = fmt.Errorf("http POST request to %s%s fails: %w. Response: %s", - client.Connection.URL, path, err, truncateString(result)) + if err != nil { + err = fmt.Errorf("http POST request to %s%s fails: %w. Response: %s", + client.Connection.URL, path, err, truncateString(result)) + } else { + err = fmt.Errorf("http POST request to %s%s fails. StatusCode: %d Response: %s", + client.Connection.URL, path, code, truncateString(result)) + } lastErr = err fmt.Fprintf(writer, "%s failed: %s\n", msg, err) <-time.After(cfg.Kibana.RetrySleepDuration) From 7af8092b9dbdce0ed1139a290d4c0398ab0edab7 Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Mon, 26 Sep 2022 01:38:33 -0400 Subject: [PATCH 136/180] [Automation] Update elastic stack version to 8.6.0-21651da3 for testing (#1290) Co-authored-by: apmmachine --- testing/environments/snapshot.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/testing/environments/snapshot.yml b/testing/environments/snapshot.yml index 45cab6ea404..4e2277d2edf 100644 --- a/testing/environments/snapshot.yml +++ b/testing/environments/snapshot.yml @@ -3,7 +3,7 @@ version: '2.3' services: elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch:8.5.0-7dc445a0-SNAPSHOT + image: docker.elastic.co/elasticsearch/elasticsearch:8.6.0-21651da3-SNAPSHOT # When extend is used it merges healthcheck.tests, see: # https://github.com/docker/compose/issues/8962 # healthcheck: @@ -42,7 +42,7 @@ services: - ./docker/logstash/pki:/etc/pki:ro kibana: - image: docker.elastic.co/kibana/kibana:8.5.0-7dc445a0-SNAPSHOT + image: docker.elastic.co/kibana/kibana:8.6.0-21651da3-SNAPSHOT environment: - "ELASTICSEARCH_USERNAME=kibana_system_user" - "ELASTICSEARCH_PASSWORD=testing" From 2b6cfdc0a764529633f55ea51cd3405232af3ebe Mon Sep 17 00:00:00 2001 From: Michal Pristas Date: Mon, 26 Sep 2022 09:29:53 +0200 Subject: [PATCH 137/180] Fixed: source uri reload for download/verify components (#1252) Fixed: source uri reload for download/verify components (#1252) --- CHANGELOG.next.asciidoc | 1 + internal/pkg/agent/operation/operator.go | 39 +++++++++++ internal/pkg/agent/operation/operator_test.go | 64 +++++++++++++++++-- internal/pkg/artifact/config.go | 8 +-- 4 files changed, 102 insertions(+), 10 deletions(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 920ddd16d84..8aa29f93e7f 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -117,6 +117,7 @@ - Fix a panic caused by a race condition when installing the Elastic Agent. {issues}806[806] - Use at least warning level for all status logs {pull}1218[1218] - Remove fleet event reporter and events from checkin body. {issue}993[993] +- Fix unintended reset of source URI when downloading components {pull}1252[1252] ==== New features diff --git a/internal/pkg/agent/operation/operator.go b/internal/pkg/agent/operation/operator.go index afe19bf702b..0705d21bb5d 100644 --- a/internal/pkg/agent/operation/operator.go +++ b/internal/pkg/agent/operation/operator.go @@ -141,6 +141,12 @@ func (o *Operator) Reload(rawConfig *config.Config) error { return errors.New(err, "failed to unpack artifact config") } + sourceURI, err := reloadSourceURI(o.logger, rawConfig) + if err != nil { + return errors.New(err, "failed to parse source URI") + } + tmp.C.SourceURI = sourceURI + if err := o.reloadComponent(o.downloader, "downloader", tmp.C); err != nil { return err } @@ -148,6 +154,39 @@ func (o *Operator) Reload(rawConfig *config.Config) error { return o.reloadComponent(o.verifier, "verifier", tmp.C) } +func reloadSourceURI(logger *logger.Logger, rawConfig *config.Config) (string, error) { + type reloadConfig struct { + // SourceURI: source of the artifacts, e.g https://artifacts.elastic.co/downloads/ + SourceURI string `json:"agent.download.sourceURI" config:"agent.download.sourceURI"` + + // FleetSourceURI: source of the artifacts, e.g https://artifacts.elastic.co/downloads/ coming from fleet which uses + // different naming. + FleetSourceURI string `json:"agent.download.source_uri" config:"agent.download.source_uri"` + } + cfg := &reloadConfig{} + if err := rawConfig.Unpack(&cfg); err != nil { + return "", errors.New(err, "failed to unpack config during reload") + } + + var newSourceURI string + if fleetURI := strings.TrimSpace(cfg.FleetSourceURI); fleetURI != "" { + // fleet configuration takes precedence + newSourceURI = fleetURI + } else if sourceURI := strings.TrimSpace(cfg.SourceURI); sourceURI != "" { + newSourceURI = sourceURI + } + + if newSourceURI != "" { + logger.Infof("Source URI in operator changed to %q", newSourceURI) + return newSourceURI, nil + } + + // source uri unset, reset to default + logger.Infof("Source URI in reset %q", artifact.DefaultSourceURI) + return artifact.DefaultSourceURI, nil + +} + func (o *Operator) reloadComponent(component interface{}, name string, cfg *artifact.Config) error { r, ok := component.(artifact.ConfigReloader) if !ok { diff --git a/internal/pkg/agent/operation/operator_test.go b/internal/pkg/agent/operation/operator_test.go index 731f04eea8b..5c0cf112ed5 100644 --- a/internal/pkg/agent/operation/operator_test.go +++ b/internal/pkg/agent/operation/operator_test.go @@ -15,10 +15,13 @@ import ( "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "github.com/elastic/elastic-agent-client/v7/pkg/proto" "github.com/elastic/elastic-agent/internal/pkg/agent/program" + "github.com/elastic/elastic-agent/internal/pkg/artifact" + "github.com/elastic/elastic-agent/internal/pkg/config" "github.com/elastic/elastic-agent/internal/pkg/core/state" ) @@ -71,7 +74,7 @@ func TestConfigurableRun(t *testing.T) { if err := operator.start(p, nil); err != nil { t.Fatal(err) } - defer operator.stop(p) // failure catch, to ensure no sub-process stays running + defer func() { _ = operator.stop(p) }() // failure catch, to ensure no sub-process stays running waitFor(t, func() error { items := operator.State() @@ -87,6 +90,7 @@ func TestConfigurableRun(t *testing.T) { // try to configure cfg := make(map[string]interface{}) + //nolint:gosec // rand is ok for test tstFilePath := filepath.Join(os.TempDir(), fmt.Sprintf("tmp%d", rand.Uint32())) cfg["TestFile"] = tstFilePath if err := operator.pushConfig(p, cfg); err != nil { @@ -145,7 +149,7 @@ func TestConfigurableFailed(t *testing.T) { if err := operator.start(p, nil); err != nil { t.Fatal(err) } - defer operator.stop(p) // failure catch, to ensure no sub-process stays running + defer func() { _ = operator.stop(p) }() // failure catch, to ensure no sub-process stays running var pid int waitFor(t, func() error { @@ -172,6 +176,7 @@ func TestConfigurableFailed(t *testing.T) { // try to configure (with failed status) cfg := make(map[string]interface{}) + //nolint:gosec // rand is ok for test tstFilePath := filepath.Join(os.TempDir(), fmt.Sprintf("tmp%d", rand.Uint32())) cfg["TestFile"] = tstFilePath cfg["Status"] = proto.StateObserved_FAILED @@ -254,7 +259,7 @@ func TestConfigurableCrash(t *testing.T) { if err := operator.start(p, nil); err != nil { t.Fatal(err) } - defer operator.stop(p) // failure catch, to ensure no sub-process stays running + defer func() { _ = operator.stop(p) }() // failure catch, to ensure no sub-process stays running var pid int waitFor(t, func() error { @@ -272,6 +277,7 @@ func TestConfigurableCrash(t *testing.T) { // try to configure (with failed status) cfg := make(map[string]interface{}) + //nolint:gosec // rand is ok for test tstFilePath := filepath.Join(os.TempDir(), fmt.Sprintf("tmp%d", rand.Uint32())) cfg["TestFile"] = tstFilePath cfg["Crash"] = true @@ -352,7 +358,7 @@ func TestConfigurableStartStop(t *testing.T) { p := getProgram("configurable", "1.0") operator := getTestOperator(t, downloadPath, installPath, p) - defer operator.stop(p) // failure catch, to ensure no sub-process stays running + defer func() { _ = operator.stop(p) }() // failure catch, to ensure no sub-process stays running // start and stop it 3 times for i := 0; i < 3; i++ { @@ -396,11 +402,11 @@ func TestConfigurableService(t *testing.T) { if err := operator.start(p, nil); err != nil { t.Fatal(err) } - defer operator.stop(p) // failure catch, to ensure no sub-process stays running + defer func() { _ = operator.stop(p) }() // failure catch, to ensure no sub-process stays running // emulating a service, so we need to start the binary here in the test spec := p.ProcessSpec() - cmd := exec.Command(spec.BinaryPath, fmt.Sprintf("%d", p.ServicePort())) + cmd := exec.Command(spec.BinaryPath, fmt.Sprintf("%d", p.ServicePort())) //nolint:gosec,G204 // this is fine cmd.Env = append(cmd.Env, os.Environ()...) cmd.Dir = filepath.Dir(spec.BinaryPath) cmd.Stdout = os.Stdout @@ -423,6 +429,7 @@ func TestConfigurableService(t *testing.T) { // try to configure cfg := make(map[string]interface{}) + //nolint:gosec // rand is ok for test tstFilePath := filepath.Join(os.TempDir(), fmt.Sprintf("tmp%d", rand.Uint32())) cfg["TestFile"] = tstFilePath if err := operator.pushConfig(p, cfg); err != nil { @@ -462,6 +469,51 @@ func TestConfigurableService(t *testing.T) { } } +func TestReloadSourceURI(t *testing.T) { + testCases := map[string]struct { + IncomingConfig map[string]interface{} + ExpectedSourceURI string + }{ + "no-config": { + IncomingConfig: map[string]interface{}{}, + ExpectedSourceURI: artifact.DefaultSourceURI, + }, + "source-uri-provided": { + IncomingConfig: map[string]interface{}{ + "agent.download.sourceURI": "http://source-uri", + }, + ExpectedSourceURI: "http://source-uri", + }, + "fleet-source-uri-provided": { + IncomingConfig: map[string]interface{}{ + "agent.download.source_uri": "http://fleet-source-uri", + }, + ExpectedSourceURI: "http://fleet-source-uri", + }, + "both-source-uri-provided": { + IncomingConfig: map[string]interface{}{ + "agent.download.sourceURI": "http://source-uri", + "agent.download.source_uri": "http://fleet-source-uri", + }, + ExpectedSourceURI: "http://fleet-source-uri", + }, + } + + l := getLogger() + for testName, tc := range testCases { + t.Run(testName, func(t *testing.T) { + cfg, err := config.NewConfigFrom(tc.IncomingConfig) + require.NoError(t, err) + require.NotNil(t, cfg) + + sourceUri, err := reloadSourceURI(l, cfg) + require.NoError(t, err) + require.Equal(t, tc.ExpectedSourceURI, sourceUri) + + }) + } +} + func isAvailable(name, version string) error { p := getProgram(name, version) spec := p.ProcessSpec() diff --git a/internal/pkg/artifact/config.go b/internal/pkg/artifact/config.go index 76637c28d31..65c021ff9b3 100644 --- a/internal/pkg/artifact/config.go +++ b/internal/pkg/artifact/config.go @@ -22,7 +22,7 @@ const ( linux = "linux" windows = "windows" - defaultSourceURI = "https://artifacts.elastic.co/downloads/" + DefaultSourceURI = "https://artifacts.elastic.co/downloads/" ) type ConfigReloader interface { @@ -139,8 +139,8 @@ func (r *Reloader) reloadSourceURI(rawConfig *config.Config) error { r.cfg.SourceURI = newSourceURI } else { // source uri unset, reset to default - r.log.Infof("Source URI reset from %q to %q", r.cfg.SourceURI, defaultSourceURI) - r.cfg.SourceURI = defaultSourceURI + r.log.Infof("Source URI reset from %q to %q", r.cfg.SourceURI, DefaultSourceURI) + r.cfg.SourceURI = DefaultSourceURI } return nil @@ -156,7 +156,7 @@ func DefaultConfig() *Config { transport.Timeout = 10 * time.Minute return &Config{ - SourceURI: defaultSourceURI, + SourceURI: DefaultSourceURI, TargetDirectory: paths.Downloads(), InstallPath: paths.Install(), HTTPTransportSettings: transport, From 717708a72c0c81aded34e93e61b77c9b0cf4e16a Mon Sep 17 00:00:00 2001 From: Michel Laterman <82832767+michel-laterman@users.noreply.github.com> Date: Mon, 26 Sep 2022 10:22:48 -0700 Subject: [PATCH 138/180] Expand status reporter/controller interfaces to allow local reporters (#1285) * Expand status reporter/controller interfaces to allow local reporters Add a local reporter map to the status controller. These reporters are not used when updating status with fleet-server, they are only used to gather local state information - specifically if the agent is degraded because checkin with fleet-server has failed. This bypasses the bug that was introduced with the liveness endpoint where the agent could checkin (to fleet-server) with a degraded status because a previous checkin failed. Local reporters are used to generate a separate status. This status is used in the liveness endpoint. * fix linter --- CHANGELOG.next.asciidoc | 1 + .../gateway/fleet/fleet_gateway.go | 9 +- .../gateway/fleet/fleet_gateway_test.go | 8 ++ .../fleet/noop_status_controller_test.go | 12 ++- internal/pkg/core/status/handler.go | 3 +- internal/pkg/core/status/reporter.go | 84 +++++++++++++++++-- internal/pkg/testutils/status_reporter.go | 10 +++ 7 files changed, 110 insertions(+), 17 deletions(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 8aa29f93e7f..35d7c2b95a5 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -118,6 +118,7 @@ - Use at least warning level for all status logs {pull}1218[1218] - Remove fleet event reporter and events from checkin body. {issue}993[993] - Fix unintended reset of source URI when downloading components {pull}1252[1252] +- Create separate status reporter for local only events so that degraded fleet-checkins no longer affect health on successful fleet-checkins. {issue}1157[1157] {pull}1285[1285] ==== New features diff --git a/internal/pkg/agent/application/gateway/fleet/fleet_gateway.go b/internal/pkg/agent/application/gateway/fleet/fleet_gateway.go index f6ff9b504f5..6df9f171fbe 100644 --- a/internal/pkg/agent/application/gateway/fleet/fleet_gateway.go +++ b/internal/pkg/agent/application/gateway/fleet/fleet_gateway.go @@ -88,6 +88,7 @@ type fleetGateway struct { checkinFailCounter int statusController status.Controller statusReporter status.Reporter + localReporter status.Reporter stateStore stateStore queue actionQueue } @@ -156,6 +157,7 @@ func newFleetGatewayWithScheduler( done: done, acker: acker, statusReporter: statusController.RegisterComponent("gateway"), + localReporter: statusController.RegisterLocalComponent("gateway-checkin"), statusController: statusController, stateStore: stateStore, queue: queue, @@ -208,6 +210,7 @@ func (f *fleetGateway) worker() { f.statusReporter.Update(state.Failed, errMsg, nil) } else { f.statusReporter.Update(state.Healthy, "", nil) + f.localReporter.Update(state.Healthy, "", nil) // we don't need to specifically set the local reporter to failed above, but it needs to be reset to healthy if a checking succeeds } case <-f.bgContext.Done(): @@ -291,12 +294,11 @@ func (f *fleetGateway) doExecute() (*fleetapi.CheckinResponse, error) { ) f.log.Error(err) + f.localReporter.Update(state.Failed, err.Error(), nil) return nil, err } if f.checkinFailCounter > 1 { - // do not update status reporter with failure - // status reporter would report connection failure on first successful connection, leading to - // stale result for certain period causing slight confusion. + f.localReporter.Update(state.Degraded, fmt.Sprintf("checkin failed: %v", err), nil) f.log.Errorf("checking number %d failed: %s", f.checkinFailCounter, err.Error()) } continue @@ -386,6 +388,7 @@ func (f *fleetGateway) stop() { f.log.Info("Fleet gateway is stopping") defer f.scheduler.Stop() f.statusReporter.Unregister() + f.localReporter.Unregister() close(f.done) f.wg.Wait() } diff --git a/internal/pkg/agent/application/gateway/fleet/fleet_gateway_test.go b/internal/pkg/agent/application/gateway/fleet/fleet_gateway_test.go index 99cb0630385..2d691185c1c 100644 --- a/internal/pkg/agent/application/gateway/fleet/fleet_gateway_test.go +++ b/internal/pkg/agent/application/gateway/fleet/fleet_gateway_test.go @@ -25,6 +25,7 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/agent/application/paths" "github.com/elastic/elastic-agent/internal/pkg/agent/storage" "github.com/elastic/elastic-agent/internal/pkg/agent/storage/store" + "github.com/elastic/elastic-agent/internal/pkg/core/state" "github.com/elastic/elastic-agent/internal/pkg/fleetapi" noopacker "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker/noop" "github.com/elastic/elastic-agent/internal/pkg/scheduler" @@ -703,12 +704,18 @@ func TestRetriesOnFailures(t *testing.T) { queue.On("DequeueActions").Return([]fleetapi.Action{}) queue.On("Actions").Return([]fleetapi.Action{}) + localReporter := &testutils.MockReporter{} + localReporter.On("Update", state.Degraded, mock.Anything, mock.Anything).Times(2) + localReporter.On("Update", mock.Anything, mock.Anything, mock.Anything).Maybe() + localReporter.On("Unregister").Maybe() + fleetReporter := &testutils.MockReporter{} fleetReporter.On("Update", mock.Anything, mock.Anything, mock.Anything).Maybe() fleetReporter.On("Unregister").Maybe() statusController := &testutils.MockController{} statusController.On("RegisterComponent", "gateway").Return(fleetReporter).Once() + statusController.On("RegisterLocalComponent", "gateway-checkin").Return(localReporter).Once() statusController.On("StatusString").Return("string") gateway, err := newFleetGatewayWithScheduler( @@ -767,6 +774,7 @@ func TestRetriesOnFailures(t *testing.T) { waitFn() statusController.AssertExpectations(t) fleetReporter.AssertExpectations(t) + localReporter.AssertExpectations(t) }) t.Run("The retry loop is interruptible", diff --git a/internal/pkg/agent/application/gateway/fleet/noop_status_controller_test.go b/internal/pkg/agent/application/gateway/fleet/noop_status_controller_test.go index bbae6958ab6..18c84f5fc5e 100644 --- a/internal/pkg/agent/application/gateway/fleet/noop_status_controller_test.go +++ b/internal/pkg/agent/application/gateway/fleet/noop_status_controller_test.go @@ -13,13 +13,17 @@ import ( type noopController struct{} -func (*noopController) SetAgentID(_ string) {} -func (*noopController) RegisterComponent(_ string) status.Reporter { return &noopReporter{} } +func (*noopController) SetAgentID(_ string) {} +func (*noopController) RegisterComponent(_ string) status.Reporter { return &noopReporter{} } +func (*noopController) RegisterLocalComponent(_ string) status.Reporter { return &noopReporter{} } func (*noopController) RegisterComponentWithPersistance(_ string, _ bool) status.Reporter { return &noopReporter{} } -func (*noopController) RegisterApp(_ string, _ string) status.Reporter { return &noopReporter{} } -func (*noopController) Status() status.AgentStatus { return status.AgentStatus{Status: status.Healthy} } +func (*noopController) RegisterApp(_ string, _ string) status.Reporter { return &noopReporter{} } +func (*noopController) Status() status.AgentStatus { return status.AgentStatus{Status: status.Healthy} } +func (*noopController) LocalStatus() status.AgentStatus { + return status.AgentStatus{Status: status.Healthy} +} func (*noopController) StatusCode() status.AgentStatusCode { return status.Healthy } func (*noopController) UpdateStateID(_ string) {} func (*noopController) StatusString() string { return "online" } diff --git a/internal/pkg/core/status/handler.go b/internal/pkg/core/status/handler.go index e82f73fb216..1fa72a10f93 100644 --- a/internal/pkg/core/status/handler.go +++ b/internal/pkg/core/status/handler.go @@ -19,10 +19,11 @@ type LivenessResponse struct { } // ServeHTTP is an HTTP Handler for the status controller. +// It uses the local agent status so it is able to report a degraded state if the fleet-server checkin has issues. // Respose code is 200 for a healthy agent, and 503 otherwise. // Response body is a JSON object that contains the agent ID, status, message, and the last status update time. func (r *controller) ServeHTTP(wr http.ResponseWriter, req *http.Request) { - s := r.Status() + s := r.LocalStatus() lr := LivenessResponse{ ID: r.agentID, Status: s.Status.String(), diff --git a/internal/pkg/core/status/reporter.go b/internal/pkg/core/status/reporter.go index 848a69326e6..50f34651fa1 100644 --- a/internal/pkg/core/status/reporter.go +++ b/internal/pkg/core/status/reporter.go @@ -58,9 +58,11 @@ type AgentStatus struct { type Controller interface { SetAgentID(string) RegisterComponent(string) Reporter + RegisterLocalComponent(string) Reporter RegisterComponentWithPersistance(string, bool) Reporter RegisterApp(id string, name string) Reporter Status() AgentStatus + LocalStatus() AgentStatus StatusCode() AgentStatusCode StatusString() string UpdateStateID(string) @@ -68,15 +70,19 @@ type Controller interface { } type controller struct { - updateTime time.Time - log *logger.Logger - reporters map[string]*reporter - appReporters map[string]*reporter - stateID string - message string - agentID string - status AgentStatusCode - mx sync.Mutex + updateTime time.Time + log *logger.Logger + reporters map[string]*reporter + localReporters map[string]*reporter + appReporters map[string]*reporter + stateID string + message string + agentID string + status AgentStatusCode + localStatus AgentStatusCode + localMessage string + localTime time.Time + mx sync.Mutex } // NewController creates a new reporter. @@ -126,6 +132,28 @@ func (r *controller) UpdateStateID(stateID string) { r.updateStatus() } +// RegisterLocalComponent registers new component for local-only status updates. +func (r *controller) RegisterLocalComponent(componentIdentifier string) Reporter { + id := componentIdentifier + "-" + uuid.New().String()[:8] + rep := &reporter{ + name: componentIdentifier, + isRegistered: true, + unregisterFunc: func() { + r.mx.Lock() + delete(r.localReporters, id) + r.mx.Unlock() + }, + notifyChangeFunc: r.updateStatus, + isPersistent: false, + } + + r.mx.Lock() + r.localReporters[id] = rep + r.mx.Unlock() + + return rep +} + // Register registers new component for status updates. func (r *controller) RegisterComponent(componentIdentifier string) Reporter { return r.RegisterComponentWithPersistance(componentIdentifier, false) @@ -199,6 +227,25 @@ func (r *controller) Status() AgentStatus { } } +// LocalStatus returns the status from the local registered components if they are different from the agent status. +// If the agent status is more severe then the local status (failed vs degraded for example) agent status is used. +// If they are equal (healthy and healthy) agent status is used. +func (r *controller) LocalStatus() AgentStatus { + status := r.Status() + r.mx.Lock() + defer r.mx.Unlock() + + if r.localStatus > status.Status { + return AgentStatus{ + Status: r.localStatus, + Message: r.localMessage, + UpdateTime: r.localTime, + } + } + return status + +} + // StatusCode retrieves current agent status code. func (r *controller) StatusCode() AgentStatusCode { r.mx.Lock() @@ -208,9 +255,23 @@ func (r *controller) StatusCode() AgentStatusCode { func (r *controller) updateStatus() { status := Healthy + lStatus := Healthy message := "" + lMessage := "" r.mx.Lock() + for id, rep := range r.localReporters { + s := statusToAgentStatus(rep.status) + if s > lStatus { + lStatus = s + lMessage = fmt.Sprintf("component %s: %s", id, rep.message) + } + r.log.Debugf("local component '%s' has status '%s'", id, s) + if status == Failed { + break + } + } + for id, rep := range r.reporters { s := statusToAgentStatus(rep.status) if s > status { @@ -244,6 +305,11 @@ func (r *controller) updateStatus() { r.message = message r.updateTime = time.Now().UTC() } + if r.localStatus != lStatus { + r.localStatus = lStatus + r.localMessage = lMessage + r.localTime = time.Now().UTC() + } r.mx.Unlock() diff --git a/internal/pkg/testutils/status_reporter.go b/internal/pkg/testutils/status_reporter.go index 45448aa53b2..1d4fded4c0a 100644 --- a/internal/pkg/testutils/status_reporter.go +++ b/internal/pkg/testutils/status_reporter.go @@ -25,6 +25,11 @@ func (m *MockController) RegisterComponent(id string) status.Reporter { return args.Get(0).(status.Reporter) } +func (m *MockController) RegisterLocalComponent(id string) status.Reporter { + args := m.Called(id) + return args.Get(0).(status.Reporter) +} + func (m *MockController) RegisterComponentWithPersistance(id string, b bool) status.Reporter { args := m.Called(id, b) return args.Get(0).(status.Reporter) @@ -40,6 +45,11 @@ func (m *MockController) Status() status.AgentStatus { return args.Get(0).(status.AgentStatus) } +func (m *MockController) LocalStatus() status.AgentStatus { + args := m.Called() + return args.Get(0).(status.AgentStatus) +} + func (m *MockController) StatusCode() status.AgentStatusCode { args := m.Called() return args.Get(0).(status.AgentStatusCode) From 5225e5408683319713c2b6603d6f55b8b359ed0a Mon Sep 17 00:00:00 2001 From: Craig MacKenzie Date: Mon, 26 Sep 2022 18:18:51 -0400 Subject: [PATCH 139/180] Improve logging for agent upgrades. (#1287) --- CHANGELOG.next.asciidoc | 1 + .../handlers/handler_action_upgrade.go | 8 ++++ .../pkg/agent/application/upgrade/cleanup.go | 10 ++-- .../agent/application/upgrade/cleanup_test.go | 16 ++++++- .../pkg/agent/application/upgrade/rollback.go | 20 +++++--- .../application/upgrade/step_download.go | 4 ++ .../agent/application/upgrade/step_mark.go | 12 +++-- .../agent/application/upgrade/step_relink.go | 4 +- .../agent/application/upgrade/step_unpack.go | 16 +++++-- .../pkg/agent/application/upgrade/upgrade.go | 48 +++++++++++-------- internal/pkg/agent/cmd/watch.go | 25 +++++----- internal/pkg/agent/control/server/server.go | 2 + 12 files changed, 115 insertions(+), 51 deletions(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 35d7c2b95a5..faef2861ba9 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -206,3 +206,4 @@ - Fix incorrectly creating a filebeat redis input when a policy contains a packetbeat redis input. {issue}[427] {pull}[700] - Add `lumberjack` input type to the Filebeat spec. {pull}[959] - Add support for hints' based autodiscovery in kubernetes provider. {pull}[698] +- Improve logging during upgrades. {pull}[1287] diff --git a/internal/pkg/agent/application/pipeline/actions/handlers/handler_action_upgrade.go b/internal/pkg/agent/application/pipeline/actions/handlers/handler_action_upgrade.go index cfc7ea83749..a0d78a91622 100644 --- a/internal/pkg/agent/application/pipeline/actions/handlers/handler_action_upgrade.go +++ b/internal/pkg/agent/application/pipeline/actions/handlers/handler_action_upgrade.go @@ -39,6 +39,14 @@ func (h *Upgrade) Handle(ctx context.Context, a fleetapi.Action, acker store.Fle } _, err := h.upgrader.Upgrade(ctx, &upgradeAction{action}, true) + if err != nil { + // Always log upgrade failures at the error level. Action errors are logged at debug level + // by default higher up the stack in ActionDispatcher.Dispatch() + h.log.Errorw("Upgrade action failed", "error.message", err, + "action.version", action.Version, "action.source_uri", action.SourceURI, "action.id", action.ActionID, + "action.start_time", action.StartTime, "action.expiration", action.ActionExpiration) + } + return err } diff --git a/internal/pkg/agent/application/upgrade/cleanup.go b/internal/pkg/agent/application/upgrade/cleanup.go index 5e0618dfe78..2581e30a1d9 100644 --- a/internal/pkg/agent/application/upgrade/cleanup.go +++ b/internal/pkg/agent/application/upgrade/cleanup.go @@ -13,11 +13,15 @@ import ( "github.com/hashicorp/go-multierror" "github.com/elastic/elastic-agent/internal/pkg/agent/application/paths" + "github.com/elastic/elastic-agent/pkg/core/logger" ) -// preUpgradeCleanup will remove files that do not have the passed version number from the downloads directory. -func preUpgradeCleanup(version string) error { - files, err := os.ReadDir(paths.Downloads()) +// cleanNonMatchingVersionsFromDownloads will remove files that do not have the passed version number from the downloads directory. +func cleanNonMatchingVersionsFromDownloads(log *logger.Logger, version string) error { + downloadsPath := paths.Downloads() + log.Debugw("Cleaning up non-matching downloaded versions", "version", version, "downloads.path", downloadsPath) + + files, err := os.ReadDir(downloadsPath) if err != nil { return fmt.Errorf("unable to read directory %q: %w", paths.Downloads(), err) } diff --git a/internal/pkg/agent/application/upgrade/cleanup_test.go b/internal/pkg/agent/application/upgrade/cleanup_test.go index 736a9c42b3d..1170c26946d 100644 --- a/internal/pkg/agent/application/upgrade/cleanup_test.go +++ b/internal/pkg/agent/application/upgrade/cleanup_test.go @@ -9,7 +9,9 @@ import ( "path/filepath" "testing" + "github.com/elastic/elastic-agent-libs/logp" "github.com/elastic/elastic-agent/internal/pkg/agent/application/paths" + "github.com/elastic/elastic-agent/pkg/core/logger" "github.com/stretchr/testify/require" ) @@ -31,7 +33,8 @@ func setupDir(t *testing.T) { func TestPreUpgradeCleanup(t *testing.T) { setupDir(t) - err := preUpgradeCleanup("8.4.0") + log := newErrorLogger(t) + err := cleanNonMatchingVersionsFromDownloads(log, "8.4.0") require.NoError(t, err) files, err := os.ReadDir(paths.Downloads()) @@ -42,3 +45,14 @@ func TestPreUpgradeCleanup(t *testing.T) { require.NoError(t, err) require.Equal(t, []byte("hello, world!"), p) } + +func newErrorLogger(t *testing.T) *logger.Logger { + t.Helper() + + loggerCfg := logger.DefaultLoggingConfig() + loggerCfg.Level = logp.ErrorLevel + + log, err := logger.NewFromConfig("", loggerCfg, false) + require.NoError(t, err) + return log +} diff --git a/internal/pkg/agent/application/upgrade/rollback.go b/internal/pkg/agent/application/upgrade/rollback.go index 8ce6958beae..b4f6014fb3d 100644 --- a/internal/pkg/agent/application/upgrade/rollback.go +++ b/internal/pkg/agent/application/upgrade/rollback.go @@ -31,33 +31,35 @@ const ( ) // Rollback rollbacks to previous version which was functioning before upgrade. -func Rollback(ctx context.Context, prevHash, currentHash string) error { +func Rollback(ctx context.Context, log *logger.Logger, prevHash string, currentHash string) error { // change symlink - if err := ChangeSymlink(ctx, prevHash); err != nil { + if err := ChangeSymlink(ctx, log, prevHash); err != nil { return err } // revert active commit - if err := UpdateActiveCommit(prevHash); err != nil { + if err := UpdateActiveCommit(log, prevHash); err != nil { return err } // Restart + log.Info("Restarting the agent after rollback") if err := restartAgent(ctx); err != nil { return err } // cleanup everything except version we're rolling back into - return Cleanup(prevHash, true) + return Cleanup(log, prevHash, true) } // Cleanup removes all artifacts and files related to a specified version. -func Cleanup(currentHash string, removeMarker bool) error { +func Cleanup(log *logger.Logger, currentHash string, removeMarker bool) error { + log.Debugw("Cleaning up upgrade", "hash", currentHash, "remove_marker", removeMarker) <-time.After(afterRestartDelay) // remove upgrade marker if removeMarker { - if err := CleanMarker(); err != nil { + if err := CleanMarker(log); err != nil { return err } } @@ -74,7 +76,9 @@ func Cleanup(currentHash string, removeMarker bool) error { } // remove symlink to avoid upgrade failures, ignore error - _ = os.Remove(prevSymlinkPath()) + prevSymlink := prevSymlinkPath() + log.Debugw("Removing previous symlink path", "file.path", prevSymlinkPath()) + _ = os.Remove(prevSymlink) dirPrefix := fmt.Sprintf("%s-", agentName) currentDir := fmt.Sprintf("%s-%s", agentName, currentHash) @@ -88,6 +92,7 @@ func Cleanup(currentHash string, removeMarker bool) error { } hashedDir := filepath.Join(paths.Data(), dir) + log.Debugw("Removing hashed data directory", "file.path", hashedDir) if cleanupErr := install.RemovePath(hashedDir); cleanupErr != nil { err = multierror.Append(err, cleanupErr) } @@ -113,6 +118,7 @@ func InvokeWatcher(log *logger.Logger) error { } }() + log.Debugw("Starting upgrade watcher", "path", cmd.Path, "args", cmd.Args, "env", cmd.Env, "dir", cmd.Dir) return cmd.Start() } diff --git a/internal/pkg/agent/application/upgrade/step_download.go b/internal/pkg/agent/application/upgrade/step_download.go index 27e4b9c9e9c..3190303b84e 100644 --- a/internal/pkg/agent/application/upgrade/step_download.go +++ b/internal/pkg/agent/application/upgrade/step_download.go @@ -40,6 +40,10 @@ func (u *Upgrader) downloadArtifact(ctx context.Context, version, sourceURI stri } } + u.log.Debugw("Downloading upgrade artifact", "version", version, + "source_uri", settings.SourceURI, "drop_path", settings.DropPath, + "target_path", settings.TargetDirectory, "install_path", settings.InstallPath) + verifier, err := newVerifier(version, u.log, &settings) if err != nil { return "", errors.New(err, "initiating verifier") diff --git a/internal/pkg/agent/application/upgrade/step_mark.go b/internal/pkg/agent/application/upgrade/step_mark.go index 66924337699..80bfaab6c44 100644 --- a/internal/pkg/agent/application/upgrade/step_mark.go +++ b/internal/pkg/agent/application/upgrade/step_mark.go @@ -17,6 +17,7 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/agent/errors" "github.com/elastic/elastic-agent/internal/pkg/fleetapi" "github.com/elastic/elastic-agent/internal/pkg/release" + "github.com/elastic/elastic-agent/pkg/core/logger" ) const markerFilename = ".update-marker" @@ -91,7 +92,7 @@ func newMarkerSerializer(m *UpdateMarker) *updateMarkerSerializer { } // markUpgrade marks update happened so we can handle grace period -func (u *Upgrader) markUpgrade(_ context.Context, hash string, action Action) error { +func (u *Upgrader) markUpgrade(_ context.Context, log *logger.Logger, hash string, action Action) error { prevVersion := release.Version() prevHash := release.Commit() if len(prevHash) > hashLen { @@ -112,11 +113,12 @@ func (u *Upgrader) markUpgrade(_ context.Context, hash string, action Action) er } markerPath := markerFilePath() + log.Infow("Writing upgrade marker file", "file.path", markerPath, "hash", marker.Hash, "prev_hash", prevHash) if err := ioutil.WriteFile(markerPath, markerBytes, 0600); err != nil { return errors.New(err, errors.TypeFilesystem, "failed to create update marker file", errors.M(errors.MetaKeyPath, markerPath)) } - if err := UpdateActiveCommit(hash); err != nil { + if err := UpdateActiveCommit(log, hash); err != nil { return err } @@ -124,8 +126,9 @@ func (u *Upgrader) markUpgrade(_ context.Context, hash string, action Action) er } // UpdateActiveCommit updates active.commit file to point to active version. -func UpdateActiveCommit(hash string) error { +func UpdateActiveCommit(log *logger.Logger, hash string) error { activeCommitPath := filepath.Join(paths.Top(), agentCommitFile) + log.Infow("Updating active commit", "file.path", activeCommitPath, "hash", hash) if err := ioutil.WriteFile(activeCommitPath, []byte(hash), 0600); err != nil { return errors.New(err, errors.TypeFilesystem, "failed to update active commit", errors.M(errors.MetaKeyPath, activeCommitPath)) } @@ -134,8 +137,9 @@ func UpdateActiveCommit(hash string) error { } // CleanMarker removes a marker from disk. -func CleanMarker() error { +func CleanMarker(log *logger.Logger) error { markerFile := markerFilePath() + log.Debugw("Removing marker file", "file.path", markerFile) if err := os.Remove(markerFile); !os.IsNotExist(err) { return err } diff --git a/internal/pkg/agent/application/upgrade/step_relink.go b/internal/pkg/agent/application/upgrade/step_relink.go index 9c998262ecd..e56b5a6642e 100644 --- a/internal/pkg/agent/application/upgrade/step_relink.go +++ b/internal/pkg/agent/application/upgrade/step_relink.go @@ -14,10 +14,11 @@ import ( "github.com/elastic/elastic-agent-libs/file" "github.com/elastic/elastic-agent/internal/pkg/agent/application/paths" "github.com/elastic/elastic-agent/internal/pkg/agent/errors" + "github.com/elastic/elastic-agent/pkg/core/logger" ) // ChangeSymlink updates symlink paths to match current version. -func ChangeSymlink(ctx context.Context, targetHash string) error { +func ChangeSymlink(ctx context.Context, log *logger.Logger, targetHash string) error { // create symlink to elastic-agent-{hash} hashedDir := fmt.Sprintf("%s-%s", agentName, targetHash) @@ -31,6 +32,7 @@ func ChangeSymlink(ctx context.Context, targetHash string) error { } prevNewPath := prevSymlinkPath() + log.Infow("Changing symlink", "symlink_path", symlinkPath, "new_path", newPath, "prev_path", prevNewPath) // remove symlink to avoid upgrade failures if err := os.Remove(prevNewPath); !os.IsNotExist(err) { diff --git a/internal/pkg/agent/application/upgrade/step_unpack.go b/internal/pkg/agent/application/upgrade/step_unpack.go index 108593c5083..4a9538a7e07 100644 --- a/internal/pkg/agent/application/upgrade/step_unpack.go +++ b/internal/pkg/agent/application/upgrade/step_unpack.go @@ -21,6 +21,7 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/agent/application/paths" "github.com/elastic/elastic-agent/internal/pkg/agent/errors" + "github.com/elastic/elastic-agent/pkg/core/logger" ) // unpack unpacks archive correctly, skips root (symlink, config...) unpacks data/* @@ -30,18 +31,21 @@ func (u *Upgrader) unpack(ctx context.Context, version, archivePath string) (str var hash string var err error if runtime.GOOS == "windows" { - hash, err = unzip(version, archivePath) + hash, err = unzip(u.log, version, archivePath) } else { - hash, err = untar(version, archivePath) + hash, err = untar(u.log, version, archivePath) } + if err != nil { + u.log.Errorw("Failed to unpack upgrade artifact", "error.message", err, "version", version, "file.path", archivePath, "hash", hash) return "", err } + u.log.Infow("Unpacked upgrade artifact", "version", version, "file.path", archivePath, "hash", hash) return hash, nil } -func unzip(version, archivePath string) (string, error) { +func unzip(log *logger.Logger, version string, archivePath string) (string, error) { var hash, rootDir string r, err := zip.OpenReader(archivePath) if err != nil { @@ -82,8 +86,10 @@ func unzip(version, archivePath string) (string, error) { path := filepath.Join(paths.Data(), strings.TrimPrefix(fileName, "data/")) if f.FileInfo().IsDir() { + log.Debugw("Unpacking directory", "archive", "zip", "file.path", path) os.MkdirAll(path, f.Mode()) } else { + log.Debugw("Unpacking file", "archive", "zip", "file.path", path) os.MkdirAll(filepath.Dir(path), f.Mode()) f, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, f.Mode()) if err != nil { @@ -119,7 +125,7 @@ func unzip(version, archivePath string) (string, error) { return hash, nil } -func untar(version, archivePath string) (string, error) { +func untar(log *logger.Logger, version string, archivePath string) (string, error) { r, err := os.Open(archivePath) if err != nil { return "", errors.New(fmt.Sprintf("artifact for 'elastic-agent' version '%s' could not be found at '%s'", version, archivePath), errors.TypeFilesystem, errors.M(errors.MetaKeyPath, archivePath)) @@ -183,6 +189,7 @@ func untar(version, archivePath string) (string, error) { mode := fi.Mode() switch { case mode.IsRegular(): + log.Debugw("Unpacking file", "archive", "tar", "file.path", abs) // just to be sure, it should already be created by Dir type if err := os.MkdirAll(filepath.Dir(abs), 0755); err != nil { return "", errors.New(err, "TarInstaller: creating directory for file "+abs, errors.TypeFilesystem, errors.M(errors.MetaKeyPath, abs)) @@ -201,6 +208,7 @@ func untar(version, archivePath string) (string, error) { return "", fmt.Errorf("TarInstaller: error writing to %s: %w", abs, err) } case mode.IsDir(): + log.Debugw("Unpacking directory", "archive", "tar", "file.path", abs) if err := os.MkdirAll(abs, 0755); err != nil { return "", errors.New(err, "TarInstaller: creating directory for file "+abs, errors.TypeFilesystem, errors.M(errors.MetaKeyPath, abs)) } diff --git a/internal/pkg/agent/application/upgrade/upgrade.go b/internal/pkg/agent/application/upgrade/upgrade.go index 1c6a85fa9d9..e31c8ef0378 100644 --- a/internal/pkg/agent/application/upgrade/upgrade.go +++ b/internal/pkg/agent/application/upgrade/upgrade.go @@ -108,6 +108,7 @@ func (u *Upgrader) Upgradeable() bool { // Upgrade upgrades running agent, function returns shutdown callback if some needs to be executed for cases when // reexec is called by caller. func (u *Upgrader) Upgrade(ctx context.Context, a Action, reexecNow bool) (_ reexec.ShutdownCallbackFn, err error) { + u.log.Infow("Upgrading agent", "version", a.Version(), "source_uri", a.SourceURI()) span, ctx := apm.StartSpan(ctx, "upgrade", "app.internal") defer span.End() // report failed @@ -126,9 +127,9 @@ func (u *Upgrader) Upgrade(ctx context.Context, a Action, reexecNow bool) (_ ree "running under control of the systems supervisor") } - err = preUpgradeCleanup(u.agentInfo.Version()) + err = cleanNonMatchingVersionsFromDownloads(u.log, u.agentInfo.Version()) if err != nil { - u.log.Errorf("Unable to clean downloads dir %q before update: %v", paths.Downloads(), err) + u.log.Errorw("Unable to clean downloads before update", "error.message", err, "downloads.path", paths.Downloads()) } if u.caps != nil { @@ -142,10 +143,10 @@ func (u *Upgrader) Upgrade(ctx context.Context, a Action, reexecNow bool) (_ ree sourceURI := u.sourceURI(a.SourceURI()) archivePath, err := u.downloadArtifact(ctx, a.Version(), sourceURI) if err != nil { - // Run the same preUpgradeCleanup task to get rid of any newly downloaded files + // Run the same pre-upgrade cleanup task to get rid of any newly downloaded files // This may have an issue if users are upgrading to the same version number. - if dErr := preUpgradeCleanup(u.agentInfo.Version()); dErr != nil { - u.log.Errorf("Unable to remove file after verification failure: %v", dErr) + if dErr := cleanNonMatchingVersionsFromDownloads(u.log, u.agentInfo.Version()); dErr != nil { + u.log.Errorw("Unable to remove file after verification failure", "error.message", dErr) } return nil, err } @@ -169,39 +170,47 @@ func (u *Upgrader) Upgrade(ctx context.Context, a Action, reexecNow bool) (_ ree return nil, nil } - if err := copyActionStore(newHash); err != nil { + if err := copyActionStore(u.log, newHash); err != nil { return nil, errors.New(err, "failed to copy action store") } - if err := ChangeSymlink(ctx, newHash); err != nil { - rollbackInstall(ctx, newHash) + if err := ChangeSymlink(ctx, u.log, newHash); err != nil { + u.log.Errorw("Rolling back: changing symlink failed", "error.message", err) + rollbackInstall(ctx, u.log, newHash) return nil, err } - if err := u.markUpgrade(ctx, newHash, a); err != nil { - rollbackInstall(ctx, newHash) + if err := u.markUpgrade(ctx, u.log, newHash, a); err != nil { + u.log.Errorw("Rolling back: marking upgrade failed", "error.message", err) + rollbackInstall(ctx, u.log, newHash) return nil, err } if err := InvokeWatcher(u.log); err != nil { - rollbackInstall(ctx, newHash) + u.log.Errorw("Rolling back: starting watcher failed", "error.message", err) + rollbackInstall(ctx, u.log, newHash) return nil, errors.New("failed to invoke rollback watcher", err) } - cb := shutdownCallback(u.log, paths.Home(), release.Version(), a.Version(), release.TrimCommit(newHash)) + trimmedNewHash := release.TrimCommit(newHash) + cb := shutdownCallback(u.log, paths.Home(), release.Version(), a.Version(), trimmedNewHash) if reexecNow { + u.log.Debugw("Removing downloads directory", "file.path", paths.Downloads(), "rexec", reexecNow) err = os.RemoveAll(paths.Downloads()) if err != nil { - u.log.Errorf("Unable to clean downloads dir %q after update: %v", paths.Downloads(), err) + u.log.Errorw("Unable to clean downloads after update", "error.message", err, "downloads.path", paths.Downloads()) } + u.log.Infow("Restarting after upgrade", "new_version", release.Version(), "prev_version", a.Version(), + "hash", trimmedNewHash, "home", paths.Home()) u.reexec.ReExec(cb) return nil, nil } // Clean everything from the downloads dir + u.log.Debugw("Removing downloads directory", "file.path", paths.Downloads(), "rexec", reexecNow) err = os.RemoveAll(paths.Downloads()) if err != nil { - u.log.Errorf("Unable to clean downloads dir %q after update: %v", paths.Downloads(), err) + u.log.Errorw("Unable to clean downloads after update", "error.message", err, "file.path", paths.Downloads()) } return cb, nil @@ -283,19 +292,20 @@ func (u *Upgrader) reportUpdating(version string) { ) } -func rollbackInstall(ctx context.Context, hash string) { +func rollbackInstall(ctx context.Context, log *logger.Logger, hash string) { os.RemoveAll(filepath.Join(paths.Data(), fmt.Sprintf("%s-%s", agentName, hash))) - _ = ChangeSymlink(ctx, release.ShortCommit()) + _ = ChangeSymlink(ctx, log, release.ShortCommit()) } -func copyActionStore(newHash string) error { +func copyActionStore(log *logger.Logger, newHash string) error { // copies legacy action_store.yml, state.yml and state.enc encrypted file if exists storePaths := []string{paths.AgentActionStoreFile(), paths.AgentStateStoreYmlFile(), paths.AgentStateStoreFile()} + newHome := filepath.Join(filepath.Dir(paths.Home()), fmt.Sprintf("%s-%s", agentName, newHash)) + log.Debugw("Copying action store", "new_home_path", newHome) for _, currentActionStorePath := range storePaths { - newHome := filepath.Join(filepath.Dir(paths.Home()), fmt.Sprintf("%s-%s", agentName, newHash)) newActionStorePath := filepath.Join(newHome, filepath.Base(currentActionStorePath)) - + log.Debugw("Copying action store path", "from", currentActionStorePath, "to", newActionStorePath) currentActionStore, err := ioutil.ReadFile(currentActionStorePath) if os.IsNotExist(err) { // nothing to copy diff --git a/internal/pkg/agent/cmd/watch.go b/internal/pkg/agent/cmd/watch.go index 64bd604cd85..353017b714e 100644 --- a/internal/pkg/agent/cmd/watch.go +++ b/internal/pkg/agent/cmd/watch.go @@ -15,6 +15,7 @@ import ( "github.com/spf13/cobra" + "github.com/elastic/elastic-agent-libs/logp" "github.com/elastic/elastic-agent/internal/pkg/agent/application/filelock" "github.com/elastic/elastic-agent/internal/pkg/agent/application/paths" "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade" @@ -40,8 +41,13 @@ func newWatchCommandWithArgs(_ []string, streams *cli.IOStreams) *cobra.Command Short: "Watch watches Elastic Agent for failures and initiates rollback.", Long: `Watch watches Elastic Agent for failures and initiates rollback.`, Run: func(_ *cobra.Command, _ []string) { - if err := watchCmd(); err != nil { - fmt.Fprintf(streams.Err, "Error: %v\n%s\n", err, troubleshootMessage()) + log, err := configuredLogger() + if err != nil { + fmt.Fprintf(streams.Err, "Error configuring logger: %v\n%s\n", err, troubleshootMessage()) + } + if err := watchCmd(log); err != nil { + log.Errorw("Watch command failed", "error.message", err) + fmt.Fprintf(streams.Err, "Watch command failed: %v\n%s\n", err, troubleshootMessage()) os.Exit(1) } }, @@ -50,12 +56,7 @@ func newWatchCommandWithArgs(_ []string, streams *cli.IOStreams) *cobra.Command return cmd } -func watchCmd() error { - log, err := configuredLogger() - if err != nil { - return err - } - +func watchCmd(log *logp.Logger) error { marker, err := upgrade.LoadMarker() if err != nil { log.Error("failed to load marker", err) @@ -88,7 +89,7 @@ func watchCmd() error { // if we're not within grace and marker is still there it might mean // that cleanup was not performed ok, cleanup everything except current version // hash is the same as hash of agent which initiated watcher. - if err := upgrade.Cleanup(release.ShortCommit(), true); err != nil { + if err := upgrade.Cleanup(log, release.ShortCommit(), true); err != nil { log.Error("rollback failed", err) } // exit nicely @@ -97,8 +98,8 @@ func watchCmd() error { ctx := context.Background() if err := watch(ctx, tilGrace, log); err != nil { - log.Debugf("Error detected proceeding to rollback: %v", err) - err = upgrade.Rollback(ctx, marker.PrevHash, marker.Hash) + log.Error("Error detected proceeding to rollback: %v", err) + err = upgrade.Rollback(ctx, log, marker.PrevHash, marker.Hash) if err != nil { log.Error("rollback failed", err) } @@ -109,7 +110,7 @@ func watchCmd() error { // in windows it might leave self untouched, this will get cleaned up // later at the start, because for windows we leave marker untouched. removeMarker := !isWindows() - err = upgrade.Cleanup(marker.Hash, removeMarker) + err = upgrade.Cleanup(log, marker.Hash, removeMarker) if err != nil { log.Error("rollback failed", err) } diff --git a/internal/pkg/agent/control/server/server.go b/internal/pkg/agent/control/server/server.go index 0b89ccd8f71..7cebc84084c 100644 --- a/internal/pkg/agent/control/server/server.go +++ b/internal/pkg/agent/control/server/server.go @@ -181,6 +181,7 @@ func (s *Server) Upgrade(ctx context.Context, request *proto.UpgradeRequest) (*p } cb, err := u.Upgrade(ctx, &upgradeRequest{request}, false) if err != nil { + s.logger.Errorw("Upgrade failed", "error.message", err, "version", request.Version, "source_uri", request.SourceURI) return &proto.UpgradeResponse{ Status: proto.ActionStatus_FAILURE, Error: err.Error(), @@ -190,6 +191,7 @@ func (s *Server) Upgrade(ctx context.Context, request *proto.UpgradeRequest) (*p // this ensures that the upgrade response over GRPC is returned go func() { <-time.After(time.Second) + s.logger.Info("Restarting after upgrade", "version", request.Version) s.rex.ReExec(cb) }() return &proto.UpgradeResponse{ From 6a0fd1bf6d6066d0f9296ab1fcc1e72f0a5ab1d9 Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Tue, 27 Sep 2022 01:37:16 -0400 Subject: [PATCH 140/180] [Automation] Update elastic stack version to 8.6.0-326f84b0 for testing (#1318) Co-authored-by: apmmachine --- testing/environments/snapshot.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/testing/environments/snapshot.yml b/testing/environments/snapshot.yml index 4e2277d2edf..a27574c766c 100644 --- a/testing/environments/snapshot.yml +++ b/testing/environments/snapshot.yml @@ -3,7 +3,7 @@ version: '2.3' services: elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch:8.6.0-21651da3-SNAPSHOT + image: docker.elastic.co/elasticsearch/elasticsearch:8.6.0-326f84b0-SNAPSHOT # When extend is used it merges healthcheck.tests, see: # https://github.com/docker/compose/issues/8962 # healthcheck: @@ -42,7 +42,7 @@ services: - ./docker/logstash/pki:/etc/pki:ro kibana: - image: docker.elastic.co/kibana/kibana:8.6.0-21651da3-SNAPSHOT + image: docker.elastic.co/kibana/kibana:8.6.0-326f84b0-SNAPSHOT environment: - "ELASTICSEARCH_USERNAME=kibana_system_user" - "ELASTICSEARCH_PASSWORD=testing" From faf98e7c8a1c4289cbdc8f80813576e3a2d3206b Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Wed, 28 Sep 2022 01:35:32 -0400 Subject: [PATCH 141/180] [Automation] Update elastic stack version to 8.6.0-df00693f for testing (#1334) Co-authored-by: apmmachine --- testing/environments/snapshot.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/testing/environments/snapshot.yml b/testing/environments/snapshot.yml index a27574c766c..cd80d94a564 100644 --- a/testing/environments/snapshot.yml +++ b/testing/environments/snapshot.yml @@ -3,7 +3,7 @@ version: '2.3' services: elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch:8.6.0-326f84b0-SNAPSHOT + image: docker.elastic.co/elasticsearch/elasticsearch:8.6.0-df00693f-SNAPSHOT # When extend is used it merges healthcheck.tests, see: # https://github.com/docker/compose/issues/8962 # healthcheck: @@ -42,7 +42,7 @@ services: - ./docker/logstash/pki:/etc/pki:ro kibana: - image: docker.elastic.co/kibana/kibana:8.6.0-326f84b0-SNAPSHOT + image: docker.elastic.co/kibana/kibana:8.6.0-df00693f-SNAPSHOT environment: - "ELASTICSEARCH_USERNAME=kibana_system_user" - "ELASTICSEARCH_PASSWORD=testing" From e6143213c1cbc69fd83aac31b1c2730b5f670fe2 Mon Sep 17 00:00:00 2001 From: Josh Dover <1813008+joshdover@users.noreply.github.com> Date: Wed, 28 Sep 2022 12:22:34 +0200 Subject: [PATCH 142/180] Add success log message after previous checkin failures (#1327) --- CHANGELOG.next.asciidoc | 1 + .../application/gateway/fleet/fleet_gateway.go | 16 +++++++++++----- 2 files changed, 12 insertions(+), 5 deletions(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index faef2861ba9..7d500cf8f53 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -119,6 +119,7 @@ - Remove fleet event reporter and events from checkin body. {issue}993[993] - Fix unintended reset of source URI when downloading components {pull}1252[1252] - Create separate status reporter for local only events so that degraded fleet-checkins no longer affect health on successful fleet-checkins. {issue}1157[1157] {pull}1285[1285] +- Add success log message after previous checkin failures {pull}1327[1327] ==== New features diff --git a/internal/pkg/agent/application/gateway/fleet/fleet_gateway.go b/internal/pkg/agent/application/gateway/fleet/fleet_gateway.go index 6df9f171fbe..b88a0cafee0 100644 --- a/internal/pkg/agent/application/gateway/fleet/fleet_gateway.go +++ b/internal/pkg/agent/application/gateway/fleet/fleet_gateway.go @@ -210,7 +210,7 @@ func (f *fleetGateway) worker() { f.statusReporter.Update(state.Failed, errMsg, nil) } else { f.statusReporter.Update(state.Healthy, "", nil) - f.localReporter.Update(state.Healthy, "", nil) // we don't need to specifically set the local reporter to failed above, but it needs to be reset to healthy if a checking succeeds + f.localReporter.Update(state.Healthy, "", nil) // we don't need to specifically set the local reporter to failed above, but it needs to be reset to healthy if a checkin succeeds } case <-f.bgContext.Done(): @@ -280,11 +280,11 @@ func (f *fleetGateway) doExecute() (*fleetapi.CheckinResponse, error) { // Guard if the context is stopped by a out of bound call, // this mean we are rebooting to change the log level or the system is shutting us down. for f.bgContext.Err() == nil { - f.log.Debugf("Checking started") + f.log.Debugf("Checkin started") resp, err := f.execute(f.bgContext) if err != nil { f.checkinFailCounter++ - f.log.Errorf("Could not communicate with fleet-server Checking API will retry, error: %s", err) + f.log.Errorf("Could not communicate with fleet-server checkin API will retry, error: %s", err) if !f.backoff.Wait() { // Something bad has happened and we log it and we should update our current state. err := errors.New( @@ -299,10 +299,16 @@ func (f *fleetGateway) doExecute() (*fleetapi.CheckinResponse, error) { } if f.checkinFailCounter > 1 { f.localReporter.Update(state.Degraded, fmt.Sprintf("checkin failed: %v", err), nil) - f.log.Errorf("checking number %d failed: %s", f.checkinFailCounter, err.Error()) + f.log.Errorf("checkin number %d failed: %s", f.checkinFailCounter, err.Error()) } continue } + + if f.checkinFailCounter > 0 { + // Log at same level as error logs above so subsequent successes are visible when log level is set to 'error'. + f.log.Errorf("Checkin request to fleet-server succeeded after %d failures", f.checkinFailCounter) + } + f.checkinFailCounter = 0 // Request was successful, return the collected actions. return resp, nil @@ -338,7 +344,7 @@ func (f *fleetGateway) execute(ctx context.Context) (*fleetapi.CheckinResponse, f.unauthCounter++ if f.shouldUnenroll() { - f.log.Warnf("retrieved an invalid api key error '%d' times. Starting to unenroll the elastic agent.", f.unauthCounter) + f.log.Warnf("received an invalid api key error '%d' times. Starting to unenroll the elastic agent.", f.unauthCounter) return &fleetapi.CheckinResponse{ Actions: []fleetapi.Action{&fleetapi.ActionUnenroll{ActionID: "", ActionType: "UNENROLL", IsDetected: true}}, }, nil From 177b5fbb92bead26bef2492cdc69871ef518ae69 Mon Sep 17 00:00:00 2001 From: Anderson Queiroz Date: Wed, 28 Sep 2022 13:51:25 +0200 Subject: [PATCH 143/180] Fix status reporter initialization (#1341) --- internal/pkg/core/status/reporter.go | 13 +++++++------ internal/pkg/core/status/reporter_test.go | 18 ++++++++++++++++++ 2 files changed, 25 insertions(+), 6 deletions(-) diff --git a/internal/pkg/core/status/reporter.go b/internal/pkg/core/status/reporter.go index 50f34651fa1..e123beab2a2 100644 --- a/internal/pkg/core/status/reporter.go +++ b/internal/pkg/core/status/reporter.go @@ -88,10 +88,11 @@ type controller struct { // NewController creates a new reporter. func NewController(log *logger.Logger) Controller { return &controller{ - status: Healthy, - reporters: make(map[string]*reporter), - appReporters: make(map[string]*reporter), - log: log, + status: Healthy, + reporters: make(map[string]*reporter), + localReporters: make(map[string]*reporter), + appReporters: make(map[string]*reporter), + log: log, } } @@ -154,12 +155,12 @@ func (r *controller) RegisterLocalComponent(componentIdentifier string) Reporter return rep } -// Register registers new component for status updates. +// RegisterComponent registers new component for status updates. func (r *controller) RegisterComponent(componentIdentifier string) Reporter { return r.RegisterComponentWithPersistance(componentIdentifier, false) } -// Register registers new component for status updates. +// RegisterComponentWithPersistance registers new component for status updates. func (r *controller) RegisterComponentWithPersistance(componentIdentifier string, persistent bool) Reporter { id := componentIdentifier + "-" + uuid.New().String()[:8] rep := &reporter{ diff --git a/internal/pkg/core/status/reporter_test.go b/internal/pkg/core/status/reporter_test.go index 09a66661fc5..c4f6796fb30 100644 --- a/internal/pkg/core/status/reporter_test.go +++ b/internal/pkg/core/status/reporter_test.go @@ -14,6 +14,24 @@ import ( "github.com/elastic/elastic-agent/pkg/core/logger" ) +func TestNewController_ensure_all_is_initialzed(t *testing.T) { + l, _ := logger.New("", false) + + newController := NewController(l) + + c, ok := newController.(*controller) + if !ok { + t.Fatalf("expected c %T, not c %T", controller{}, newController) + } + + c.reporters["ignore"] = &reporter{} + c.localReporters["ignore"] = &reporter{} + c.appReporters["ignore"] = &reporter{} + if c.log == nil { + t.Error("logger shouldn't be nil, it was not correctly assigned") + } +} + func TestReporter(t *testing.T) { l, _ := logger.New("", false) t.Run("healthy by default", func(t *testing.T) { From c35935d8d66e7755b475a5938630b1e7b1b0bdfe Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Thu, 29 Sep 2022 01:37:08 -0400 Subject: [PATCH 144/180] [Automation] Update elastic stack version to 8.6.0-a2f4f140 for testing (#1362) Co-authored-by: apmmachine --- testing/environments/snapshot.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/testing/environments/snapshot.yml b/testing/environments/snapshot.yml index cd80d94a564..084b8b035c4 100644 --- a/testing/environments/snapshot.yml +++ b/testing/environments/snapshot.yml @@ -3,7 +3,7 @@ version: '2.3' services: elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch:8.6.0-df00693f-SNAPSHOT + image: docker.elastic.co/elasticsearch/elasticsearch:8.6.0-a2f4f140-SNAPSHOT # When extend is used it merges healthcheck.tests, see: # https://github.com/docker/compose/issues/8962 # healthcheck: @@ -42,7 +42,7 @@ services: - ./docker/logstash/pki:/etc/pki:ro kibana: - image: docker.elastic.co/kibana/kibana:8.6.0-df00693f-SNAPSHOT + image: docker.elastic.co/kibana/kibana:8.6.0-a2f4f140-SNAPSHOT environment: - "ELASTICSEARCH_USERNAME=kibana_system_user" - "ELASTICSEARCH_PASSWORD=testing" From 4d7fae9f3c82222ac342733b6c7c1f91beed731e Mon Sep 17 00:00:00 2001 From: Julia Bardi <90178898+juliaElastic@users.noreply.github.com> Date: Thu, 29 Sep 2022 14:10:54 +0200 Subject: [PATCH 145/180] Added status message to CheckinRequest (#1369) * Added status message to CheckinRequest * added changelog * updated test * added omitempty --- CHANGELOG.next.asciidoc | 1 + internal/pkg/agent/application/gateway/fleet/fleet_gateway.go | 1 + .../pkg/agent/application/gateway/fleet/fleet_gateway_test.go | 2 ++ internal/pkg/fleetapi/checkin_cmd.go | 1 + 4 files changed, 5 insertions(+) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 7d500cf8f53..2ce614336a4 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -208,3 +208,4 @@ - Add `lumberjack` input type to the Filebeat spec. {pull}[959] - Add support for hints' based autodiscovery in kubernetes provider. {pull}[698] - Improve logging during upgrades. {pull}[1287] +- Added status message to CheckinRequest {pull}[1369] diff --git a/internal/pkg/agent/application/gateway/fleet/fleet_gateway.go b/internal/pkg/agent/application/gateway/fleet/fleet_gateway.go index b88a0cafee0..897b81ea0d3 100644 --- a/internal/pkg/agent/application/gateway/fleet/fleet_gateway.go +++ b/internal/pkg/agent/application/gateway/fleet/fleet_gateway.go @@ -337,6 +337,7 @@ func (f *fleetGateway) execute(ctx context.Context) (*fleetapi.CheckinResponse, AckToken: ackToken, Metadata: ecsMeta, Status: f.statusController.StatusString(), + Message: f.statusController.Status().Message, } resp, err := cmd.Execute(ctx, req) diff --git a/internal/pkg/agent/application/gateway/fleet/fleet_gateway_test.go b/internal/pkg/agent/application/gateway/fleet/fleet_gateway_test.go index 2d691185c1c..0cc00e739a8 100644 --- a/internal/pkg/agent/application/gateway/fleet/fleet_gateway_test.go +++ b/internal/pkg/agent/application/gateway/fleet/fleet_gateway_test.go @@ -26,6 +26,7 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/agent/storage" "github.com/elastic/elastic-agent/internal/pkg/agent/storage/store" "github.com/elastic/elastic-agent/internal/pkg/core/state" + "github.com/elastic/elastic-agent/internal/pkg/core/status" "github.com/elastic/elastic-agent/internal/pkg/fleetapi" noopacker "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker/noop" "github.com/elastic/elastic-agent/internal/pkg/scheduler" @@ -717,6 +718,7 @@ func TestRetriesOnFailures(t *testing.T) { statusController.On("RegisterComponent", "gateway").Return(fleetReporter).Once() statusController.On("RegisterLocalComponent", "gateway-checkin").Return(localReporter).Once() statusController.On("StatusString").Return("string") + statusController.On("Status").Return(status.AgentStatus{Message: "message"}) gateway, err := newFleetGatewayWithScheduler( ctx, diff --git a/internal/pkg/fleetapi/checkin_cmd.go b/internal/pkg/fleetapi/checkin_cmd.go index e225aababb9..d6a63a45e29 100644 --- a/internal/pkg/fleetapi/checkin_cmd.go +++ b/internal/pkg/fleetapi/checkin_cmd.go @@ -23,6 +23,7 @@ const checkingPath = "/api/fleet/agents/%s/checkin" // CheckinRequest consists of multiple events reported to fleet ui. type CheckinRequest struct { Status string `json:"status"` + Message string `json:"message,omitempty"` AckToken string `json:"ack_token,omitempty"` Metadata *info.ECSMeta `json:"local_metadata,omitempty"` } From e184051717ba412a78e75994e647738229120365 Mon Sep 17 00:00:00 2001 From: Michal Pristas Date: Thu, 29 Sep 2022 16:08:12 +0200 Subject: [PATCH 146/180] Fix failures when using npipe monitoring endpoints (#1371) --- internal/pkg/core/monitoring/beats/sidecar_monitor.go | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/internal/pkg/core/monitoring/beats/sidecar_monitor.go b/internal/pkg/core/monitoring/beats/sidecar_monitor.go index aa249bafa0f..c5d45c1c82d 100644 --- a/internal/pkg/core/monitoring/beats/sidecar_monitor.go +++ b/internal/pkg/core/monitoring/beats/sidecar_monitor.go @@ -88,6 +88,10 @@ func (b *SidecarMonitor) EnrichArgs(spec program.Spec, pipelineID string, args [ func (b *SidecarMonitor) Cleanup(spec program.Spec, pipelineID string) error { endpoint := MonitoringEndpoint(spec, b.operatingSystem, pipelineID, true) drop := monitoringDrop(endpoint) + if drop == "" { + // not exposed using sockets + return nil + } return os.RemoveAll(drop) } @@ -104,6 +108,11 @@ func (b *SidecarMonitor) Prepare(spec program.Spec, pipelineID string, uid, gid endpoint := MonitoringEndpoint(spec, b.operatingSystem, pipelineID, true) drop := monitoringDrop(endpoint) + if drop == "" { + // not exposed using sockets + return nil + } + if err := os.MkdirAll(drop, 0775); err != nil { return errors.New(err, fmt.Sprintf("failed to create a directory %q", drop)) } From c6a22d4b165dd72167e90aea7e548c39cdcf9c63 Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Fri, 30 Sep 2022 01:36:59 -0400 Subject: [PATCH 147/180] [Automation] Update elastic stack version to 8.6.0-158a13db for testing (#1379) Co-authored-by: apmmachine --- testing/environments/snapshot.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/testing/environments/snapshot.yml b/testing/environments/snapshot.yml index 084b8b035c4..bd04ca78f28 100644 --- a/testing/environments/snapshot.yml +++ b/testing/environments/snapshot.yml @@ -3,7 +3,7 @@ version: '2.3' services: elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch:8.6.0-a2f4f140-SNAPSHOT + image: docker.elastic.co/elasticsearch/elasticsearch:8.6.0-158a13db-SNAPSHOT # When extend is used it merges healthcheck.tests, see: # https://github.com/docker/compose/issues/8962 # healthcheck: @@ -42,7 +42,7 @@ services: - ./docker/logstash/pki:/etc/pki:ro kibana: - image: docker.elastic.co/kibana/kibana:8.6.0-a2f4f140-SNAPSHOT + image: docker.elastic.co/kibana/kibana:8.6.0-158a13db-SNAPSHOT environment: - "ELASTICSEARCH_USERNAME=kibana_system_user" - "ELASTICSEARCH_PASSWORD=testing" From 90c2c00beaf50c9ab11d048188bc210905c4cc1d Mon Sep 17 00:00:00 2001 From: Yash Tewari Date: Mon, 3 Oct 2022 12:24:43 +0530 Subject: [PATCH 148/180] Mount /etc directory in Kubernetes DaemonSet manifests. (#1382) Changes made to files like `/etc/passwd` using Linux tools like `useradd` are not reflected in the mounted file on the Agent, because the tool replaces the file instead of changing it in-place. Mounting the parent directory solves this problem. --- .../elastic-agent-managed-kubernetes.yaml | 34 ++++--------------- .../elastic-agent-managed-daemonset.yaml | 34 ++++--------------- .../elastic-agent-standalone-kubernetes.yaml | 34 ++++--------------- .../elastic-agent-standalone-daemonset.yaml | 34 ++++--------------- 4 files changed, 28 insertions(+), 108 deletions(-) diff --git a/deploy/kubernetes/elastic-agent-managed-kubernetes.yaml b/deploy/kubernetes/elastic-agent-managed-kubernetes.yaml index acb8f8d5ea2..0f7bf79f107 100644 --- a/deploy/kubernetes/elastic-agent-managed-kubernetes.yaml +++ b/deploy/kubernetes/elastic-agent-managed-kubernetes.yaml @@ -83,21 +83,12 @@ spec: - name: varlog mountPath: /var/log readOnly: true - - name: etc-kubernetes - mountPath: /hostfs/etc/kubernetes + - name: etc-full + mountPath: /hostfs/etc readOnly: true - name: var-lib mountPath: /hostfs/var/lib readOnly: true - - name: passwd - mountPath: /hostfs/etc/passwd - readOnly: true - - name: group - mountPath: /hostfs/etc/group - readOnly: true - - name: etcsysmd - mountPath: /hostfs/etc/systemd - readOnly: true - name: etc-mid mountPath: /etc/machine-id readOnly: true @@ -114,26 +105,15 @@ spec: - name: varlog hostPath: path: /var/log - # Needed for cloudbeat - - name: etc-kubernetes + # The following volumes are needed for Cloud Security Posture integration (cloudbeat) + # If you are not using this integration, then these volumes and the corresponding + # mounts can be removed. + - name: etc-full hostPath: - path: /etc/kubernetes - # Needed for cloudbeat + path: /etc - name: var-lib hostPath: path: /var/lib - # Needed for cloudbeat - - name: passwd - hostPath: - path: /etc/passwd - # Needed for cloudbeat - - name: group - hostPath: - path: /etc/group - # Needed for cloudbeat - - name: etcsysmd - hostPath: - path: /etc/systemd # Mount /etc/machine-id from the host to determine host ID # Needed for Elastic Security integration - name: etc-mid diff --git a/deploy/kubernetes/elastic-agent-managed/elastic-agent-managed-daemonset.yaml b/deploy/kubernetes/elastic-agent-managed/elastic-agent-managed-daemonset.yaml index 878b15b8a6e..17959a4febe 100644 --- a/deploy/kubernetes/elastic-agent-managed/elastic-agent-managed-daemonset.yaml +++ b/deploy/kubernetes/elastic-agent-managed/elastic-agent-managed-daemonset.yaml @@ -83,21 +83,12 @@ spec: - name: varlog mountPath: /var/log readOnly: true - - name: etc-kubernetes - mountPath: /hostfs/etc/kubernetes + - name: etc-full + mountPath: /hostfs/etc readOnly: true - name: var-lib mountPath: /hostfs/var/lib readOnly: true - - name: passwd - mountPath: /hostfs/etc/passwd - readOnly: true - - name: group - mountPath: /hostfs/etc/group - readOnly: true - - name: etcsysmd - mountPath: /hostfs/etc/systemd - readOnly: true - name: etc-mid mountPath: /etc/machine-id readOnly: true @@ -114,26 +105,15 @@ spec: - name: varlog hostPath: path: /var/log - # Needed for cloudbeat - - name: etc-kubernetes + # The following volumes are needed for Cloud Security Posture integration (cloudbeat) + # If you are not using this integration, then these volumes and the corresponding + # mounts can be removed. + - name: etc-full hostPath: - path: /etc/kubernetes - # Needed for cloudbeat + path: /etc - name: var-lib hostPath: path: /var/lib - # Needed for cloudbeat - - name: passwd - hostPath: - path: /etc/passwd - # Needed for cloudbeat - - name: group - hostPath: - path: /etc/group - # Needed for cloudbeat - - name: etcsysmd - hostPath: - path: /etc/systemd # Mount /etc/machine-id from the host to determine host ID # Needed for Elastic Security integration - name: etc-mid diff --git a/deploy/kubernetes/elastic-agent-standalone-kubernetes.yaml b/deploy/kubernetes/elastic-agent-standalone-kubernetes.yaml index d6ce952dadd..e43a251408f 100644 --- a/deploy/kubernetes/elastic-agent-standalone-kubernetes.yaml +++ b/deploy/kubernetes/elastic-agent-standalone-kubernetes.yaml @@ -722,21 +722,12 @@ spec: - name: varlog mountPath: /var/log readOnly: true - - name: etc-kubernetes - mountPath: /hostfs/etc/kubernetes + - name: etc-full + mountPath: /hostfs/etc readOnly: true - name: var-lib mountPath: /hostfs/var/lib readOnly: true - - name: passwd - mountPath: /hostfs/etc/passwd - readOnly: true - - name: group - mountPath: /hostfs/etc/group - readOnly: true - - name: etcsysmd - mountPath: /hostfs/etc/systemd - readOnly: true volumes: - name: datastreams configMap: @@ -757,26 +748,15 @@ spec: - name: varlog hostPath: path: /var/log - # Needed for cloudbeat - - name: etc-kubernetes + # The following volumes are needed for Cloud Security Posture integration (cloudbeat) + # If you are not using this integration, then these volumes and the corresponding + # mounts can be removed. + - name: etc-full hostPath: - path: /etc/kubernetes - # Needed for cloudbeat + path: /etc - name: var-lib hostPath: path: /var/lib - # Needed for cloudbeat - - name: passwd - hostPath: - path: /etc/passwd - # Needed for cloudbeat - - name: group - hostPath: - path: /etc/group - # Needed for cloudbeat - - name: etcsysmd - hostPath: - path: /etc/systemd --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding diff --git a/deploy/kubernetes/elastic-agent-standalone/elastic-agent-standalone-daemonset.yaml b/deploy/kubernetes/elastic-agent-standalone/elastic-agent-standalone-daemonset.yaml index 675c68c6dfb..9d865811e46 100644 --- a/deploy/kubernetes/elastic-agent-standalone/elastic-agent-standalone-daemonset.yaml +++ b/deploy/kubernetes/elastic-agent-standalone/elastic-agent-standalone-daemonset.yaml @@ -94,21 +94,12 @@ spec: - name: varlog mountPath: /var/log readOnly: true - - name: etc-kubernetes - mountPath: /hostfs/etc/kubernetes + - name: etc-full + mountPath: /hostfs/etc readOnly: true - name: var-lib mountPath: /hostfs/var/lib readOnly: true - - name: passwd - mountPath: /hostfs/etc/passwd - readOnly: true - - name: group - mountPath: /hostfs/etc/group - readOnly: true - - name: etcsysmd - mountPath: /hostfs/etc/systemd - readOnly: true volumes: - name: datastreams configMap: @@ -129,23 +120,12 @@ spec: - name: varlog hostPath: path: /var/log - # Needed for cloudbeat - - name: etc-kubernetes + # The following volumes are needed for Cloud Security Posture integration (cloudbeat) + # If you are not using this integration, then these volumes and the corresponding + # mounts can be removed. + - name: etc-full hostPath: - path: /etc/kubernetes - # Needed for cloudbeat + path: /etc - name: var-lib hostPath: path: /var/lib - # Needed for cloudbeat - - name: passwd - hostPath: - path: /etc/passwd - # Needed for cloudbeat - - name: group - hostPath: - path: /etc/group - # Needed for cloudbeat - - name: etcsysmd - hostPath: - path: /etc/systemd From 6d4087597dbf0f8a17988688140ec8cc3bcc1e78 Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Tue, 4 Oct 2022 01:39:28 -0400 Subject: [PATCH 149/180] [Automation] Update elastic stack version to 8.6.0-aea1c645 for testing (#1405) Co-authored-by: apmmachine --- testing/environments/snapshot.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/testing/environments/snapshot.yml b/testing/environments/snapshot.yml index bd04ca78f28..04187c29869 100644 --- a/testing/environments/snapshot.yml +++ b/testing/environments/snapshot.yml @@ -3,7 +3,7 @@ version: '2.3' services: elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch:8.6.0-158a13db-SNAPSHOT + image: docker.elastic.co/elasticsearch/elasticsearch:8.6.0-aea1c645-SNAPSHOT # When extend is used it merges healthcheck.tests, see: # https://github.com/docker/compose/issues/8962 # healthcheck: @@ -42,7 +42,7 @@ services: - ./docker/logstash/pki:/etc/pki:ro kibana: - image: docker.elastic.co/kibana/kibana:8.6.0-158a13db-SNAPSHOT + image: docker.elastic.co/kibana/kibana:8.6.0-aea1c645-SNAPSHOT environment: - "ELASTICSEARCH_USERNAME=kibana_system_user" - "ELASTICSEARCH_PASSWORD=testing" From 7155492418e499f5bc854b9edc88300dbab1eb07 Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Wed, 5 Oct 2022 01:38:22 -0400 Subject: [PATCH 150/180] [Automation] Update elastic stack version to 8.6.0-0fca2953 for testing (#1412) Co-authored-by: apmmachine --- testing/environments/snapshot.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/testing/environments/snapshot.yml b/testing/environments/snapshot.yml index 04187c29869..29ca952ce45 100644 --- a/testing/environments/snapshot.yml +++ b/testing/environments/snapshot.yml @@ -3,7 +3,7 @@ version: '2.3' services: elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch:8.6.0-aea1c645-SNAPSHOT + image: docker.elastic.co/elasticsearch/elasticsearch:8.6.0-0fca2953-SNAPSHOT # When extend is used it merges healthcheck.tests, see: # https://github.com/docker/compose/issues/8962 # healthcheck: @@ -42,7 +42,7 @@ services: - ./docker/logstash/pki:/etc/pki:ro kibana: - image: docker.elastic.co/kibana/kibana:8.6.0-aea1c645-SNAPSHOT + image: docker.elastic.co/kibana/kibana:8.6.0-0fca2953-SNAPSHOT environment: - "ELASTICSEARCH_USERNAME=kibana_system_user" - "ELASTICSEARCH_PASSWORD=testing" From f4b8e2ece0b5a9451fa2c7a5789fda0ea4abfca5 Mon Sep 17 00:00:00 2001 From: Victor Martinez Date: Wed, 5 Oct 2022 14:58:38 +0100 Subject: [PATCH 151/180] ci: 7.17 is not available for the daily run (#1417) --- .ci/schedule-daily.groovy | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.ci/schedule-daily.groovy b/.ci/schedule-daily.groovy index 5c1d7134858..adc1ec0f02e 100644 --- a/.ci/schedule-daily.groovy +++ b/.ci/schedule-daily.groovy @@ -20,7 +20,7 @@ pipeline { stages { stage('Nighly beats builds') { steps { - runBuilds(quietPeriodFactor: 2000, branches: ['main', '8.', '8.', '7.']) + runBuilds(quietPeriodFactor: 2000, branches: ['main', '8.', '8.']) } } } From 166e7f69c231abd1e5f4f6bb74b0abf865784e49 Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Thu, 6 Oct 2022 01:38:23 -0400 Subject: [PATCH 152/180] [Automation] Update elastic stack version to 8.6.0-e4c15f15 for testing (#1425) Co-authored-by: apmmachine --- testing/environments/snapshot.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/testing/environments/snapshot.yml b/testing/environments/snapshot.yml index 29ca952ce45..a4101c6a007 100644 --- a/testing/environments/snapshot.yml +++ b/testing/environments/snapshot.yml @@ -3,7 +3,7 @@ version: '2.3' services: elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch:8.6.0-0fca2953-SNAPSHOT + image: docker.elastic.co/elasticsearch/elasticsearch:8.6.0-e4c15f15-SNAPSHOT # When extend is used it merges healthcheck.tests, see: # https://github.com/docker/compose/issues/8962 # healthcheck: @@ -42,7 +42,7 @@ services: - ./docker/logstash/pki:/etc/pki:ro kibana: - image: docker.elastic.co/kibana/kibana:8.6.0-0fca2953-SNAPSHOT + image: docker.elastic.co/kibana/kibana:8.6.0-e4c15f15-SNAPSHOT environment: - "ELASTICSEARCH_USERNAME=kibana_system_user" - "ELASTICSEARCH_PASSWORD=testing" From bd06f46da995a70e8d2d8809f3c14ad0a51169f9 Mon Sep 17 00:00:00 2001 From: Aleksandr Maus Date: Thu, 6 Oct 2022 06:50:42 -0400 Subject: [PATCH 153/180] [backport main] Fix: Agent failed to upgrade from 8.4.2 to 8.5.0 BC1 for MAC 12 agent using agent binary. (#1401) [backport main] Fix: Agent failed to upgrade from 8.4.2 to 8.5.0 BC1 for MAC 12 agent using agent binary. (#1401) --- dev-tools/packaging/packages.yml | 3 + .../templates/darwin/elastic-agent.tmpl | 11 +++ internal/pkg/agent/application/info/state.go | 25 +---- .../pkg/agent/application/info/state_test.go | 53 ----------- .../pkg/agent/application/paths/common.go | 49 +++++++--- .../agent/application/paths/common_test.go | 92 +++++++++++++++++++ .../application/upgrade/service_darwin.go | 11 +-- .../agent/application/upgrade/step_relink.go | 17 +++- .../agent/application/upgrade/step_unpack.go | 20 ++-- .../pkg/agent/application/upgrade/upgrade.go | 7 +- internal/pkg/agent/install/install.go | 19 ++-- 11 files changed, 178 insertions(+), 129 deletions(-) create mode 100644 dev-tools/packaging/templates/darwin/elastic-agent.tmpl delete mode 100644 internal/pkg/agent/application/info/state_test.go create mode 100644 internal/pkg/agent/application/paths/common_test.go diff --git a/dev-tools/packaging/packages.yml b/dev-tools/packaging/packages.yml index 860e86e97a7..d2e8df06e4f 100644 --- a/dev-tools/packaging/packages.yml +++ b/dev-tools/packaging/packages.yml @@ -1089,6 +1089,9 @@ specs: <<: *agent_darwin_binary_spec <<: *elastic_license_for_binaries files: + 'data/{{.BeatName}}-{{ commit_short }}/elastic-agent': + template: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/darwin/elastic-agent.tmpl' + mode: 0755 '{{.BeatName}}{{.BinaryExt}}': source: data/{{.BeatName}}-{{ commit_short }}/elastic-agent.app/Contents/MacOS/{{.BeatName}}{{.BinaryExt}} symlink: true diff --git a/dev-tools/packaging/templates/darwin/elastic-agent.tmpl b/dev-tools/packaging/templates/darwin/elastic-agent.tmpl new file mode 100644 index 00000000000..74c0f238c28 --- /dev/null +++ b/dev-tools/packaging/templates/darwin/elastic-agent.tmpl @@ -0,0 +1,11 @@ +#!/bin/sh +# Fix up the symlink and exit + +set -e + +symlink="/Library/Elastic/Agent/elastic-agent" + +if test -L "$symlink"; then + ln -sfn "data/elastic-agent-{{ commit_short }}/elastic-agent.app/Contents/MacOS/elastic-agent" "$symlink" +fi + diff --git a/internal/pkg/agent/application/info/state.go b/internal/pkg/agent/application/info/state.go index e00948fab58..b9d73504d06 100644 --- a/internal/pkg/agent/application/info/state.go +++ b/internal/pkg/agent/application/info/state.go @@ -5,14 +5,11 @@ package info import ( - "fmt" "os" "path/filepath" "runtime" - "strings" "github.com/elastic/elastic-agent/internal/pkg/agent/application/paths" - "github.com/elastic/elastic-agent/internal/pkg/release" ) const ( @@ -31,17 +28,8 @@ func RunningInstalled() bool { } execPath, _ := os.Executable() execPath, _ = filepath.Abs(execPath) - execName := filepath.Base(execPath) - execDir := filepath.Dir(execPath) - if IsInsideData(execDir) { - // executable path is being reported as being down inside of data path - // move up to directories to perform the comparison - execDir = filepath.Dir(filepath.Dir(execDir)) - if runtime.GOOS == darwin { - execDir = filepath.Dir(filepath.Dir(filepath.Dir(execDir))) - } - execPath = filepath.Join(execDir, execName) - } + + execPath = filepath.Join(paths.ExecDir(filepath.Dir(execPath)), filepath.Base(execPath)) for _, expected := range expectedPaths { if paths.ArePathsEqual(expected, execPath) { return true @@ -49,12 +37,3 @@ func RunningInstalled() bool { } return false } - -// IsInsideData returns true when the exePath is inside of the current Agents data path. -func IsInsideData(exePath string) bool { - expectedPath := filepath.Join("data", fmt.Sprintf("elastic-agent-%s", release.ShortCommit())) - if runtime.GOOS == darwin { - expectedPath = filepath.Join(expectedPath, "elastic-agent.app", "Contents", "MacOS") - } - return strings.HasSuffix(exePath, expectedPath) -} diff --git a/internal/pkg/agent/application/info/state_test.go b/internal/pkg/agent/application/info/state_test.go deleted file mode 100644 index 39f5b7e9738..00000000000 --- a/internal/pkg/agent/application/info/state_test.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package info - -import ( - "fmt" - "path/filepath" - "runtime" - "testing" - - "github.com/elastic/elastic-agent/internal/pkg/release" - "github.com/google/go-cmp/cmp" -) - -func TestIsInsideData(t *testing.T) { - - validExePath := filepath.Join("data", fmt.Sprintf("elastic-agent-%s", release.ShortCommit())) - - if runtime.GOOS == darwin { - validExePath = filepath.Join(validExePath, "elastic-agent.app", "Contents", "MacOS") - } - - tests := []struct { - name string - exePath string - res bool - }{ - { - name: "empty", - }, - { - name: "invalid", - exePath: "data/elastic-agent", - }, - { - name: "valid", - exePath: validExePath, - res: true, - }, - } - - for _, tc := range tests { - t.Run(tc.name, func(t *testing.T) { - res := IsInsideData(tc.exePath) - diff := cmp.Diff(tc.res, res) - if diff != "" { - t.Error(diff) - } - }) - } -} diff --git a/internal/pkg/agent/application/paths/common.go b/internal/pkg/agent/application/paths/common.go index 3bebe122154..8bf37d36c88 100644 --- a/internal/pkg/agent/application/paths/common.go +++ b/internal/pkg/agent/application/paths/common.go @@ -177,21 +177,14 @@ func SetInstall(path string) { // initialTop returns the initial top-level path for the binary // // When nested in top-level/data/elastic-agent-${hash}/ the result is top-level/. -// The agent fexecutable for MacOS is wrappend in the bundle, so the path to the binary is +// The agent executable for MacOS is wrapped in the app bundle, so the path to the binary is // top-level/data/elastic-agent-${hash}/elastic-agent.app/Contents/MacOS func initialTop() string { - exePath := retrieveExecutablePath() - if insideData(exePath) { - exePath = filepath.Dir(filepath.Dir(exePath)) - if runtime.GOOS == darwin { - exePath = filepath.Dir(filepath.Dir(filepath.Dir(exePath))) - } - } - return exePath + return ExecDir(retrieveExecutableDir()) } // retrieveExecutablePath returns the executing binary, even if the started binary was a symlink -func retrieveExecutablePath() string { +func retrieveExecutableDir() string { execPath, err := os.Executable() if err != nil { panic(err) @@ -203,11 +196,37 @@ func retrieveExecutablePath() string { return filepath.Dir(evalPath) } -// insideData returns true when the exePath is inside of the current Agents data path. -func insideData(exePath string) bool { - expectedPath := filepath.Join("data", fmt.Sprintf("elastic-agent-%s", release.ShortCommit())) +// isInsideData returns true when the exePath is inside of the current Agents data path. +func isInsideData(exeDir string) bool { + expectedDir := binaryDir(filepath.Join("data", fmt.Sprintf("elastic-agent-%s", release.ShortCommit()))) + return strings.HasSuffix(exeDir, expectedDir) +} + +// ExecDir returns the "executable" directory which is: +// 1. The same if the execDir is not inside of the data path +// 2. Two levels up if the execDir inside of the data path on non-macOS platforms +// 3. Five levels up if the execDir inside of the dataPath on macOS platform +func ExecDir(execDir string) string { + if isInsideData(execDir) { + execDir = filepath.Dir(filepath.Dir(execDir)) + if runtime.GOOS == darwin { + execDir = filepath.Dir(filepath.Dir(filepath.Dir(execDir))) + } + } + return execDir +} + +// binaryDir returns the application binary directory +// For macOS it appends the path inside of the app bundle +// For other platforms it returns the same dir +func binaryDir(baseDir string) string { if runtime.GOOS == darwin { - expectedPath = filepath.Join(expectedPath, "elastic-agent.app", "Contents", "MacOS") + baseDir = filepath.Join(baseDir, "elastic-agent.app", "Contents", "MacOS") } - return strings.HasSuffix(exePath, expectedPath) + return baseDir +} + +// BinaryPath returns the application binary path that is concatenation of the directory and the agentName +func BinaryPath(baseDir, agentName string) string { + return filepath.Join(binaryDir(baseDir), agentName) } diff --git a/internal/pkg/agent/application/paths/common_test.go b/internal/pkg/agent/application/paths/common_test.go new file mode 100644 index 00000000000..a5d76b405be --- /dev/null +++ b/internal/pkg/agent/application/paths/common_test.go @@ -0,0 +1,92 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package paths + +import ( + "fmt" + "path/filepath" + "runtime" + "testing" + + "github.com/elastic/elastic-agent/internal/pkg/release" + "github.com/google/go-cmp/cmp" +) + +func validTestPath() string { + validPath := filepath.Join("data", fmt.Sprintf("elastic-agent-%s", release.ShortCommit())) + if runtime.GOOS == darwin { + validPath = filepath.Join(validPath, "elastic-agent.app", "Contents", "MacOS") + } + return validPath +} + +func TestIsInsideData(t *testing.T) { + tests := []struct { + name string + exePath string + res bool + }{ + { + name: "empty", + }, + { + name: "invalid", + exePath: "data/elastic-agent", + }, + { + name: "valid", + exePath: validTestPath(), + res: true, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + res := isInsideData(tc.exePath) + diff := cmp.Diff(tc.res, res) + if diff != "" { + t.Error(diff) + } + }) + } +} + +func TestExecDir(t *testing.T) { + base := filepath.Join(string(filepath.Separator), "Library", "Elastic", "Agent") + tests := []struct { + name string + execDir string + resDir string + }{ + { + name: "empty", + }, + { + name: "non-data path", + execDir: "data/elastic-agent", + resDir: "data/elastic-agent", + }, + { + name: "valid", + execDir: validTestPath(), + resDir: ".", + }, + { + name: "valid abs", + execDir: filepath.Join(base, validTestPath()), + resDir: base, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + resDir := ExecDir(tc.execDir) + diff := cmp.Diff(tc.resDir, resDir) + if diff != "" { + t.Error(diff) + } + }) + } +} diff --git a/internal/pkg/agent/application/upgrade/service_darwin.go b/internal/pkg/agent/application/upgrade/service_darwin.go index 2bdb435147b..58709dd3e53 100644 --- a/internal/pkg/agent/application/upgrade/service_darwin.go +++ b/internal/pkg/agent/application/upgrade/service_darwin.go @@ -14,7 +14,6 @@ import ( "fmt" "os" "os/exec" - "path/filepath" "regexp" "strconv" "strings" @@ -50,13 +49,13 @@ func (p *darwinPidProvider) Close() {} func (p *darwinPidProvider) PID(ctx context.Context) (int, error) { piders := []func(context.Context) (int, error){ - p.piderFromCmd(ctx, "launchctl", "list", paths.ServiceName), + p.piderFromCmd("launchctl", "list", paths.ServiceName), } // if release is specifically built to be upgradeable (using DEV flag) // we dont require to run as a service and will need sudo fallback if release.Upgradeable() { - piders = append(piders, p.piderFromCmd(ctx, "sudo", "launchctl", "list", paths.ServiceName)) + piders = append(piders, p.piderFromCmd("sudo", "launchctl", "list", paths.ServiceName)) } var pidErrors error @@ -72,7 +71,7 @@ func (p *darwinPidProvider) PID(ctx context.Context) (int, error) { return 0, pidErrors } -func (p *darwinPidProvider) piderFromCmd(ctx context.Context, name string, args ...string) func(context.Context) (int, error) { +func (p *darwinPidProvider) piderFromCmd(name string, args ...string) func(context.Context) (int, error) { return func(context.Context) (int, error) { listCmd := exec.Command(name, args...) listCmd.SysProcAttr = &syscall.SysProcAttr{ @@ -115,8 +114,8 @@ func (p *darwinPidProvider) piderFromCmd(ctx context.Context, name string, args } func invokeCmd(topPath string) *exec.Cmd { - homeExePath := filepath.Join(topPath, agentName) - + // paths.BinaryPath properly derives the newPath depending on the platform. The path to the binary for macOS is inside of the app bundle. + homeExePath := paths.BinaryPath(topPath, agentName) cmd := exec.Command(homeExePath, watcherSubcommand, "--path.config", paths.Config(), "--path.home", paths.Top(), diff --git a/internal/pkg/agent/application/upgrade/step_relink.go b/internal/pkg/agent/application/upgrade/step_relink.go index e56b5a6642e..13c49693062 100644 --- a/internal/pkg/agent/application/upgrade/step_relink.go +++ b/internal/pkg/agent/application/upgrade/step_relink.go @@ -17,18 +17,25 @@ import ( "github.com/elastic/elastic-agent/pkg/core/logger" ) +const ( + windows = "windows" + exe = ".exe" +) + // ChangeSymlink updates symlink paths to match current version. func ChangeSymlink(ctx context.Context, log *logger.Logger, targetHash string) error { // create symlink to elastic-agent-{hash} hashedDir := fmt.Sprintf("%s-%s", agentName, targetHash) symlinkPath := filepath.Join(paths.Top(), agentName) - newPath := filepath.Join(paths.Top(), "data", hashedDir, agentName) + + // paths.BinaryPath properly derives the binary directory depending on the platform. The path to the binary for macOS is inside of the app bundle. + newPath := paths.BinaryPath(filepath.Join(paths.Top(), "data", hashedDir), agentName) // handle windows suffixes - if runtime.GOOS == "windows" { - symlinkPath += ".exe" - newPath += ".exe" + if runtime.GOOS == windows { + symlinkPath += exe + newPath += exe } prevNewPath := prevSymlinkPath() @@ -51,7 +58,7 @@ func prevSymlinkPath() string { agentPrevName := agentName + ".prev" // handle windows suffixes - if runtime.GOOS == "windows" { + if runtime.GOOS == windows { agentPrevName = agentName + ".exe.prev" } diff --git a/internal/pkg/agent/application/upgrade/step_unpack.go b/internal/pkg/agent/application/upgrade/step_unpack.go index 4a9538a7e07..45d007e55f4 100644 --- a/internal/pkg/agent/application/upgrade/step_unpack.go +++ b/internal/pkg/agent/application/upgrade/step_unpack.go @@ -8,10 +8,8 @@ import ( "archive/tar" "archive/zip" "compress/gzip" - "context" "fmt" "io" - "io/ioutil" "os" "path/filepath" "runtime" @@ -25,13 +23,13 @@ import ( ) // unpack unpacks archive correctly, skips root (symlink, config...) unpacks data/* -func (u *Upgrader) unpack(ctx context.Context, version, archivePath string) (string, error) { +func (u *Upgrader) unpack(version, archivePath string) (string, error) { // unpack must occur in directory that holds the installation directory // or the extraction will be double nested var hash string var err error - if runtime.GOOS == "windows" { - hash, err = unzip(u.log, version, archivePath) + if runtime.GOOS == windows { + hash, err = unzip(u.log, archivePath) } else { hash, err = untar(u.log, version, archivePath) } @@ -45,7 +43,7 @@ func (u *Upgrader) unpack(ctx context.Context, version, archivePath string) (str return hash, nil } -func unzip(log *logger.Logger, version string, archivePath string) (string, error) { +func unzip(log *logger.Logger, archivePath string) (string, error) { var hash, rootDir string r, err := zip.OpenReader(archivePath) if err != nil { @@ -69,7 +67,7 @@ func unzip(log *logger.Logger, version string, archivePath string) (string, erro //get hash fileName := strings.TrimPrefix(f.Name, fileNamePrefix) if fileName == agentCommitFile { - hashBytes, err := ioutil.ReadAll(rc) + hashBytes, err := io.ReadAll(rc) if err != nil || len(hashBytes) < hashLen { return err } @@ -87,10 +85,10 @@ func unzip(log *logger.Logger, version string, archivePath string) (string, erro if f.FileInfo().IsDir() { log.Debugw("Unpacking directory", "archive", "zip", "file.path", path) - os.MkdirAll(path, f.Mode()) + _ = os.MkdirAll(path, f.Mode()) } else { log.Debugw("Unpacking file", "archive", "zip", "file.path", path) - os.MkdirAll(filepath.Dir(path), f.Mode()) + _ = os.MkdirAll(filepath.Dir(path), f.Mode()) f, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, f.Mode()) if err != nil { return err @@ -101,6 +99,7 @@ func unzip(log *logger.Logger, version string, archivePath string) (string, erro } }() + //nolint:gosec // legacy if _, err = io.Copy(f, rc); err != nil { return err } @@ -163,7 +162,7 @@ func untar(log *logger.Logger, version string, archivePath string) (string, erro fileName := strings.TrimPrefix(f.Name, fileNamePrefix) if fileName == agentCommitFile { - hashBytes, err := ioutil.ReadAll(tr) + hashBytes, err := io.ReadAll(tr) if err != nil || len(hashBytes) < hashLen { return "", err } @@ -200,6 +199,7 @@ func untar(log *logger.Logger, version string, archivePath string) (string, erro return "", errors.New(err, "TarInstaller: creating file "+abs, errors.TypeFilesystem, errors.M(errors.MetaKeyPath, abs)) } + //nolint:gosec // legacy _, err = io.Copy(wf, tr) if closeErr := wf.Close(); closeErr != nil && err == nil { err = closeErr diff --git a/internal/pkg/agent/application/upgrade/upgrade.go b/internal/pkg/agent/application/upgrade/upgrade.go index e31c8ef0378..d8c55e17806 100644 --- a/internal/pkg/agent/application/upgrade/upgrade.go +++ b/internal/pkg/agent/application/upgrade/upgrade.go @@ -7,7 +7,6 @@ package upgrade import ( "context" "fmt" - "io/ioutil" "os" "path/filepath" "strings" @@ -151,7 +150,7 @@ func (u *Upgrader) Upgrade(ctx context.Context, a Action, reexecNow bool) (_ ree return nil, err } - newHash, err := u.unpack(ctx, a.Version(), archivePath) + newHash, err := u.unpack(a.Version(), archivePath) if err != nil { return nil, err } @@ -306,7 +305,7 @@ func copyActionStore(log *logger.Logger, newHash string) error { for _, currentActionStorePath := range storePaths { newActionStorePath := filepath.Join(newHome, filepath.Base(currentActionStorePath)) log.Debugw("Copying action store path", "from", currentActionStorePath, "to", newActionStorePath) - currentActionStore, err := ioutil.ReadFile(currentActionStorePath) + currentActionStore, err := os.ReadFile(currentActionStorePath) if os.IsNotExist(err) { // nothing to copy continue @@ -315,7 +314,7 @@ func copyActionStore(log *logger.Logger, newHash string) error { return err } - if err := ioutil.WriteFile(newActionStorePath, currentActionStore, 0600); err != nil { + if err := os.WriteFile(newActionStorePath, currentActionStore, 0600); err != nil { return err } } diff --git a/internal/pkg/agent/install/install.go b/internal/pkg/agent/install/install.go index a5b02eb015b..431fd1db931 100644 --- a/internal/pkg/agent/install/install.go +++ b/internal/pkg/agent/install/install.go @@ -6,14 +6,12 @@ package install import ( "fmt" - "io/ioutil" "os" "path/filepath" "runtime" "github.com/otiai10/copy" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/info" "github.com/elastic/elastic-agent/internal/pkg/agent/application/paths" "github.com/elastic/elastic-agent/internal/pkg/agent/errors" ) @@ -58,7 +56,10 @@ func Install(cfgFile string) error { // place shell wrapper, if present on platform if paths.ShellWrapperPath != "" { - // Install symlink for darwin instead + // Install symlink for darwin instead of the wrapper script. + // Elastic-agent should be first process that launchd starts in order to be able to grant + // the Full-Disk Access (FDA) to the agent and it's child processes. + // This is specifically important for osquery FDA permissions at the moment. if runtime.GOOS == darwin { // Check if previous shell wrapper or symlink exists and remove it so it can be overwritten if _, err := os.Lstat(paths.ShellWrapperPath); err == nil { @@ -80,7 +81,7 @@ func Install(cfgFile string) error { err = os.MkdirAll(filepath.Dir(paths.ShellWrapperPath), 0755) if err == nil { //nolint: gosec // this is intended to be an executable shell script, not chaning the permissions for the linter - err = ioutil.WriteFile(paths.ShellWrapperPath, []byte(paths.ShellWrapper), 0755) + err = os.WriteFile(paths.ShellWrapperPath, []byte(paths.ShellWrapper), 0755) } if err != nil { return errors.New( @@ -172,15 +173,7 @@ func findDirectory() (string, error) { if err != nil { return "", err } - sourceDir := filepath.Dir(execPath) - if info.IsInsideData(sourceDir) { - // executable path is being reported as being down inside of data path - // move up to directories to perform the copy - sourceDir = filepath.Dir(filepath.Dir(sourceDir)) - if runtime.GOOS == darwin { - sourceDir = filepath.Dir(filepath.Dir(filepath.Dir(sourceDir))) - } - } + sourceDir := paths.ExecDir(filepath.Dir(execPath)) err = verifyDirectory(sourceDir) if err != nil { return "", err From 22231384f80fa5c0bd6a31472808fec9c35e6841 Mon Sep 17 00:00:00 2001 From: Tiago Queiroz Date: Thu, 6 Oct 2022 18:59:15 +0200 Subject: [PATCH 154/180] Fix docker provider add_fields processors (#1420) The Docker provider was using a wrong key when defining the `add_fields` processor, this causes Filebeat not to start the input and stay on a unhealthy state. This commig fixes it. Fixes https://github.com/elastic/beats/issues/29030 --- ...989867-fix-docker-provider-processors.yaml | 31 +++++++++++++++++++ .../pkg/composable/providers/docker/docker.go | 2 +- .../providers/docker/docker_test.go | 2 +- 3 files changed, 33 insertions(+), 2 deletions(-) create mode 100644 changelog/fragments/1664989867-fix-docker-provider-processors.yaml diff --git a/changelog/fragments/1664989867-fix-docker-provider-processors.yaml b/changelog/fragments/1664989867-fix-docker-provider-processors.yaml new file mode 100644 index 00000000000..c7c87152479 --- /dev/null +++ b/changelog/fragments/1664989867-fix-docker-provider-processors.yaml @@ -0,0 +1,31 @@ +# Kind can be one of: +# - breaking-change: a change to previously-documented behavior +# - deprecation: functionality that is being removed in a later release +# - bug-fix: fixes a problem in a previous version +# - enhancement: extends functionality but does not break or fix existing behavior +# - feature: new functionality +# - known-issue: problems that we are aware of in a given version +# - security: impacts on the security of a product or a user’s deployment. +# - upgrade: important information for someone upgrading from a prior version +# - other: does not fit into any of the other categories +kind: bug-fix + +# Change summary; a 80ish characters long description of the change. +summary: Fix docker provider add_fields processors + +# Long description; in case the summary is not enough to describe the change +# this field accommodate a description without length limits. +#description: + +# Affected component; a word indicating the component this changeset affects. +component: providers + +# PR number; optional; the PR number that added the changeset. +# If not present is automatically filled by the tooling finding the PR where this changelog fragment has been added. +# NOTE: the tooling supports backports, so it's able to fill the original PR number instead of the backport PR number. +# Please provide it if you are adding a fragment for a different PR. +#pr: 1234 + +# Issue number; optional; the GitHub issue related to this changeset (either closes or is part of). +# If not present is automatically filled by the tooling with the issue linked to the PR number. +#issue: 1234 diff --git a/internal/pkg/composable/providers/docker/docker.go b/internal/pkg/composable/providers/docker/docker.go index b832cbb6c92..f4b4afb9c70 100644 --- a/internal/pkg/composable/providers/docker/docker.go +++ b/internal/pkg/composable/providers/docker/docker.go @@ -149,7 +149,7 @@ func generateData(event bus.Event) (*dockerContainerData, error) { "image": container.Image, "labels": processorLabelMap, }, - "to": "container", + "target": "container", }, }, }, diff --git a/internal/pkg/composable/providers/docker/docker_test.go b/internal/pkg/composable/providers/docker/docker_test.go index d0b5c69ba4d..a035fe06a58 100644 --- a/internal/pkg/composable/providers/docker/docker_test.go +++ b/internal/pkg/composable/providers/docker/docker_test.go @@ -53,7 +53,7 @@ func TestGenerateData(t *testing.T) { "co_elastic_logs/disable": "true", }, }, - "to": "container", + "target": "container", }, }, } From d8c993922550f858845f4fa5b9a8da7d4c502333 Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Fri, 7 Oct 2022 01:36:35 -0400 Subject: [PATCH 155/180] [Automation] Update elastic stack version to 8.6.0-d939cfde for testing (#1436) Co-authored-by: apmmachine --- testing/environments/snapshot.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/testing/environments/snapshot.yml b/testing/environments/snapshot.yml index a4101c6a007..0886e299887 100644 --- a/testing/environments/snapshot.yml +++ b/testing/environments/snapshot.yml @@ -3,7 +3,7 @@ version: '2.3' services: elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch:8.6.0-e4c15f15-SNAPSHOT + image: docker.elastic.co/elasticsearch/elasticsearch:8.6.0-d939cfde-SNAPSHOT # When extend is used it merges healthcheck.tests, see: # https://github.com/docker/compose/issues/8962 # healthcheck: @@ -42,7 +42,7 @@ services: - ./docker/logstash/pki:/etc/pki:ro kibana: - image: docker.elastic.co/kibana/kibana:8.6.0-e4c15f15-SNAPSHOT + image: docker.elastic.co/kibana/kibana:8.6.0-d939cfde-SNAPSHOT environment: - "ELASTICSEARCH_USERNAME=kibana_system_user" - "ELASTICSEARCH_PASSWORD=testing" From b0a98e2fac32dea33fd98f58660fff61633363ef Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Mon, 10 Oct 2022 01:38:38 -0400 Subject: [PATCH 156/180] [Automation] Update elastic stack version to 8.6.0-7c9f25a9 for testing (#1446) Co-authored-by: apmmachine --- testing/environments/snapshot.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/testing/environments/snapshot.yml b/testing/environments/snapshot.yml index 0886e299887..df51cdf11c0 100644 --- a/testing/environments/snapshot.yml +++ b/testing/environments/snapshot.yml @@ -3,7 +3,7 @@ version: '2.3' services: elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch:8.6.0-d939cfde-SNAPSHOT + image: docker.elastic.co/elasticsearch/elasticsearch:8.6.0-7c9f25a9-SNAPSHOT # When extend is used it merges healthcheck.tests, see: # https://github.com/docker/compose/issues/8962 # healthcheck: @@ -42,7 +42,7 @@ services: - ./docker/logstash/pki:/etc/pki:ro kibana: - image: docker.elastic.co/kibana/kibana:8.6.0-d939cfde-SNAPSHOT + image: docker.elastic.co/kibana/kibana:8.6.0-7c9f25a9-SNAPSHOT environment: - "ELASTICSEARCH_USERNAME=kibana_system_user" - "ELASTICSEARCH_PASSWORD=testing" From a3ea75072acaf6b4ba104377c0a78a6c1b320fcf Mon Sep 17 00:00:00 2001 From: Chris Mark Date: Mon, 10 Oct 2022 12:52:22 +0300 Subject: [PATCH 157/180] Enable integration only when datastreams are not defined (#1456) --- .../composable/providers/kubernetes/hints.go | 7 +- .../providers/kubernetes/hints_test.go | 73 ++++++++++++++++++- 2 files changed, 74 insertions(+), 6 deletions(-) diff --git a/internal/pkg/composable/providers/kubernetes/hints.go b/internal/pkg/composable/providers/kubernetes/hints.go index 1a779e0c2c6..5499d1408cb 100644 --- a/internal/pkg/composable/providers/kubernetes/hints.go +++ b/internal/pkg/composable/providers/kubernetes/hints.go @@ -144,9 +144,7 @@ func GenerateHintsMapping(hints mapstr.M, kubeMeta mapstr.M, logger *logp.Logger if integration == "" { return hintsMapping } - integrationHints := mapstr.M{ - "enabled": true, - } + integrationHints := mapstr.M{} if containerID != "" { _, _ = hintsMapping.Put("container_id", containerID) @@ -194,6 +192,9 @@ func GenerateHintsMapping(hints mapstr.M, kubeMeta mapstr.M, logger *logp.Logger } dataStreams := builder.getDataStreams(hints) + if len(dataStreams) == 0 { + _, _ = integrationHints.Put("enabled", true) + } for _, dataStream := range dataStreams { streamHints := mapstr.M{ "enabled": true, diff --git a/internal/pkg/composable/providers/kubernetes/hints_test.go b/internal/pkg/composable/providers/kubernetes/hints_test.go index e23296d09a7..04c25575f26 100644 --- a/internal/pkg/composable/providers/kubernetes/hints_test.go +++ b/internal/pkg/composable/providers/kubernetes/hints_test.go @@ -78,7 +78,6 @@ func TestGenerateHintsMapping(t *testing.T) { expected := mapstr.M{ "redis": mapstr.M{ - "enabled": true, "host": "127.0.0.5:6379", "metrics_path": "/metrics", "username": "username", @@ -118,6 +117,76 @@ func TestGenerateHintsMapping(t *testing.T) { assert.Equal(t, expected, hintsMapping) } +func TestGenerateHintsMappingWithDefaults(t *testing.T) { + logger := getLogger() + pod := &kubernetes.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testpod", + UID: types.UID(uid), + Namespace: "testns", + Labels: map[string]string{ + "foo": "bar", + "with-dash": "dash-value", + "with/slash": "some/path", + }, + Annotations: map[string]string{ + "app": "production", + }, + }, + TypeMeta: metav1.TypeMeta{ + Kind: "Pod", + APIVersion: "v1", + }, + Spec: kubernetes.PodSpec{ + NodeName: "testnode", + }, + Status: kubernetes.PodStatus{PodIP: "127.0.0.5"}, + } + + mapping := map[string]interface{}{ + "namespace": pod.GetNamespace(), + "pod": mapstr.M{ + "uid": string(pod.GetUID()), + "name": pod.GetName(), + "ip": pod.Status.PodIP, + }, + "namespace_annotations": mapstr.M{ + "nsa": "nsb", + }, + "labels": mapstr.M{ + "foo": "bar", + "with-dash": "dash-value", + "with/slash": "some/path", + }, + "annotations": mapstr.M{ + "app": "production", + }, + } + hints := mapstr.M{ + "hints": mapstr.M{ + "host": "${kubernetes.pod.ip}:6379", + "package": "redis", + "metrics_path": "/metrics", + "timeout": "42s", + "period": "42s", + }, + } + + expected := mapstr.M{ + "redis": mapstr.M{ + "enabled": true, + "host": "127.0.0.5:6379", + "metrics_path": "/metrics", + "timeout": "42s", + "period": "42s", + }, + } + + hintsMapping := GenerateHintsMapping(hints, mapping, logger, "") + + assert.Equal(t, expected, hintsMapping) +} + func TestGenerateHintsMappingWithContainerID(t *testing.T) { logger := getLogger() pod := &kubernetes.Pod{ @@ -184,7 +253,6 @@ func TestGenerateHintsMappingWithContainerID(t *testing.T) { "container_logs": mapstr.M{ "enabled": true, }, - "enabled": true, "host": "127.0.0.5:6379", "metrics_path": "/metrics", "username": "username", @@ -281,7 +349,6 @@ func TestGenerateHintsMappingWithLogStream(t *testing.T) { expected := mapstr.M{ "container_id": "asdfghjkl", "apache": mapstr.M{ - "enabled": true, "container_logs": mapstr.M{ "enabled": true, }, From f772a3deab4bab4894e37f11ad731b1be3ea93aa Mon Sep 17 00:00:00 2001 From: Michael Katsoulis Date: Mon, 10 Oct 2022 15:15:40 +0300 Subject: [PATCH 158/180] Add not dedoted k8s pod labels in autodiscover provider to be used for templating, exactly like annotations (#1398) --- CHANGELOG.next.asciidoc | 1 + .../pkg/composable/providers/kubernetes/pod.go | 15 ++++++++++++++- 2 files changed, 15 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 2ce614336a4..f178d80c735 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -120,6 +120,7 @@ - Fix unintended reset of source URI when downloading components {pull}1252[1252] - Create separate status reporter for local only events so that degraded fleet-checkins no longer affect health on successful fleet-checkins. {issue}1157[1157] {pull}1285[1285] - Add success log message after previous checkin failures {pull}1327[1327] +- Fix inconsistency between kubernetes pod annotations and labels in autodiscovery templates {pull}1327[1327] ==== New features diff --git a/internal/pkg/composable/providers/kubernetes/pod.go b/internal/pkg/composable/providers/kubernetes/pod.go index d4553dda6d3..27c9b53bec2 100644 --- a/internal/pkg/composable/providers/kubernetes/pod.go +++ b/internal/pkg/composable/providers/kubernetes/pod.go @@ -267,6 +267,12 @@ func generatePodData( _ = safemapstr.Put(annotations, k, v) } k8sMapping["annotations"] = annotations + // Pass labels(not dedoted) to all events so that they can be used in templating. + labels := mapstr.M{} + for k, v := range pod.GetObjectMeta().GetLabels() { + _ = safemapstr.Put(labels, k, v) + } + k8sMapping["labels"] = labels processors := []map[string]interface{}{} // meta map includes metadata that go under kubernetes.* @@ -305,6 +311,12 @@ func generateContainerData( _ = safemapstr.Put(annotations, k, v) } + // Pass labels to all events so that it can be used in templating. + labels := mapstr.M{} + for k, v := range pod.GetObjectMeta().GetLabels() { + _ = safemapstr.Put(labels, k, v) + } + for _, c := range containers { // If it doesn't have an ID, container doesn't exist in // the runtime, emit only an event if we are stopping, so @@ -329,8 +341,9 @@ func generateContainerData( if len(namespaceAnnotations) != 0 { k8sMapping["namespace_annotations"] = namespaceAnnotations } - // add annotations to be discoverable by templates + // add annotations and labels to be discoverable by templates k8sMapping["annotations"] = annotations + k8sMapping["labels"] = labels //container ECS fields cmeta := mapstr.M{ From 0efbca645122704ed2698e6216a75b2e7da10638 Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Tue, 11 Oct 2022 01:41:08 -0400 Subject: [PATCH 159/180] [Automation] Update elastic stack version to 8.6.0-c49fac70 for testing (#1464) Co-authored-by: apmmachine --- testing/environments/snapshot.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/testing/environments/snapshot.yml b/testing/environments/snapshot.yml index df51cdf11c0..66813f6f057 100644 --- a/testing/environments/snapshot.yml +++ b/testing/environments/snapshot.yml @@ -3,7 +3,7 @@ version: '2.3' services: elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch:8.6.0-7c9f25a9-SNAPSHOT + image: docker.elastic.co/elasticsearch/elasticsearch:8.6.0-c49fac70-SNAPSHOT # When extend is used it merges healthcheck.tests, see: # https://github.com/docker/compose/issues/8962 # healthcheck: @@ -42,7 +42,7 @@ services: - ./docker/logstash/pki:/etc/pki:ro kibana: - image: docker.elastic.co/kibana/kibana:8.6.0-7c9f25a9-SNAPSHOT + image: docker.elastic.co/kibana/kibana:8.6.0-c49fac70-SNAPSHOT environment: - "ELASTICSEARCH_USERNAME=kibana_system_user" - "ELASTICSEARCH_PASSWORD=testing" From 7f813fa99607a76da365e04cfedf8a5e424c2e74 Mon Sep 17 00:00:00 2001 From: Michael Katsoulis Date: Tue, 11 Oct 2022 13:58:52 +0300 Subject: [PATCH 160/180] Add storageclass permissions in agent clusterrole (#1470) * Add storageclass permissions in agent clusterrole --- CHANGELOG.next.asciidoc | 2 +- deploy/kubernetes/elastic-agent-managed-kubernetes.yaml | 4 ++++ .../elastic-agent-managed/elastic-agent-managed-role.yaml | 4 ++++ 3 files changed, 9 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index f178d80c735..f8033809cb4 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -121,7 +121,7 @@ - Create separate status reporter for local only events so that degraded fleet-checkins no longer affect health on successful fleet-checkins. {issue}1157[1157] {pull}1285[1285] - Add success log message after previous checkin failures {pull}1327[1327] - Fix inconsistency between kubernetes pod annotations and labels in autodiscovery templates {pull}1327[1327] - +- Add permissions to elastic-agent-managed clusterrole to get, list, watch storageclasses {pull}1470[1470] ==== New features - Prepare packaging for endpoint and asc files {pull-beats}[20186] diff --git a/deploy/kubernetes/elastic-agent-managed-kubernetes.yaml b/deploy/kubernetes/elastic-agent-managed-kubernetes.yaml index 0f7bf79f107..1f3c3d8ec9b 100644 --- a/deploy/kubernetes/elastic-agent-managed-kubernetes.yaml +++ b/deploy/kubernetes/elastic-agent-managed-kubernetes.yaml @@ -227,6 +227,10 @@ rules: resources: - podsecuritypolicies verbs: ["get", "list", "watch"] + - apiGroups: [ "storage.k8s.io" ] + resources: + - storageclasses + verbs: [ "get", "list", "watch" ] --- apiVersion: rbac.authorization.k8s.io/v1 kind: Role diff --git a/deploy/kubernetes/elastic-agent-managed/elastic-agent-managed-role.yaml b/deploy/kubernetes/elastic-agent-managed/elastic-agent-managed-role.yaml index 0d961215f4e..778a4ba5520 100644 --- a/deploy/kubernetes/elastic-agent-managed/elastic-agent-managed-role.yaml +++ b/deploy/kubernetes/elastic-agent-managed/elastic-agent-managed-role.yaml @@ -63,6 +63,10 @@ rules: resources: - podsecuritypolicies verbs: ["get", "list", "watch"] + - apiGroups: [ "storage.k8s.io" ] + resources: + - storageclasses + verbs: [ "get", "list", "watch" ] --- apiVersion: rbac.authorization.k8s.io/v1 kind: Role From 6223273d2e18b50ab9f29b99091b7540da476860 Mon Sep 17 00:00:00 2001 From: Julien Lind Date: Tue, 11 Oct 2022 14:02:07 +0200 Subject: [PATCH 161/180] Remote QA-labels automation (#1455) --- .github/workflows/qa-labels.yml | 93 --------------------------------- 1 file changed, 93 deletions(-) delete mode 100644 .github/workflows/qa-labels.yml diff --git a/.github/workflows/qa-labels.yml b/.github/workflows/qa-labels.yml deleted file mode 100644 index bbbd4439847..00000000000 --- a/.github/workflows/qa-labels.yml +++ /dev/null @@ -1,93 +0,0 @@ -name: Add QA labels to Elastic Agent issues -on: - # pull_request_target allows running actions on PRs from forks with a read/write GITHUB_TOKEN, but it will not allow - # running workflows defined in the PRs itself, only workflows already merged into the target branch. This avoids - # potential vulnerabilities that could allow someone to open a PR and retrieve secrets. - # It's important that this workflow never runs any checkout actions which could be used to circumvent this protection. - # See these links for more information: - # - https://github.blog/2020-08-03-github-actions-improvements-for-fork-and-pull-request-workflows/ - # - https://nathandavison.com/blog/github-actions-and-the-threat-of-malicious-pull-requests - pull_request_target: - types: - - closed - -jobs: - fetch_issues_to_label: - runs-on: ubuntu-latest - # Only run on PRs that were merged for the Elastic Agent teams - if: | - github.event.pull_request.merged_at && - ( - contains(github.event.pull_request.labels.*.name, 'Team:Elastic-Agent') || - contains(github.event.pull_request.labels.*.name, 'Team:Elastic-Agent-Data-Plane') || - contains(github.event.pull_request.labels.*.name, 'Team:Elastic-Agent-Control-Plane') - ) - outputs: - issue_ids: ${{ steps.issues_to_label.outputs.value }} - label_ids: ${{ steps.label_ids.outputs.value }} - steps: - - uses: octokit/graphql-action@v2.x - id: closing_issues - with: - query: | - query closingIssueNumbersQuery($prnumber: Int!) { - repository(owner: "elastic", name: "elastic-agent") { - pullRequest(number: $prnumber) { - closingIssuesReferences(first: 10) { - nodes { - id - labels(first: 20) { - nodes { - id - name - } - } - } - } - } - } - } - prnumber: ${{ github.event.number }} - token: ${{ secrets.GITHUB_TOKEN }} - - uses: sergeysova/jq-action@v2 - id: issues_to_label - with: - # Map to the issues' node id - cmd: echo $CLOSING_ISSUES | jq -c '.repository.pullRequest.closingIssuesReferences.nodes | map(.id)' - multiline: true - env: - CLOSING_ISSUES: ${{ steps.closing_issues.outputs.data }} - - uses: sergeysova/jq-action@v2 - id: label_ids - with: - # Get list of version labels on pull request and map to label's node id, append 'QA:Ready For Testing' id ("LA_kwDOGgEmJc7mkkl9]") - cmd: echo $PR_LABELS | jq -c 'map(select(.name | test("v[0-9]+\\.[0-9]+\\.[0-9]+")) | .node_id) + ["LA_kwDOGgEmJc7mkkl9]' - multiline: true - env: - PR_LABELS: ${{ toJSON(github.event.pull_request.labels) }} - - label_issues: - needs: fetch_issues_to_label - runs-on: ubuntu-latest - # For each issue closed by the PR run this job - if: | - fromJSON(needs.fetch_issues_to_label.outputs.issue_ids).length > 0 && - fromJSON(needs.fetch_issues_to_label.outputs.label_ids).length > 0 - strategy: - matrix: - issueNodeId: ${{ fromJSON(needs.fetch_issues_to_label.outputs.issue_ids) }} - labelId: ${{ fromJSON(needs.fetch_issues_to_label.outputs.label_ids) }} - name: Label issue ${{ matrix.issueNodeId }} - steps: - - uses: octokit/graphql-action@v2.x - id: add_labels_to_closed_issue - with: - query: | - mutation add_label($issueid:ID!, $labelids:[String!]!) { - addLabelsToLabelable(input: {labelableId: $issueid, labelIds: $labelids}) { - clientMutationId - } - } - issueid: ${{ matrix.issueNodeId }} - labelids: ${{ matrix.labelId }} - token: ${{ secrets.GITHUB_TOKEN }} From 6c325d09e7ea1e9b67bd5347d2d6c94fa0e7d3fb Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Tue, 11 Oct 2022 10:03:54 -0400 Subject: [PATCH 162/180] [Automation] Update go release version to 1.18.7 (#1444) Co-authored-by: apmmachine --- .go-version | 2 +- Dockerfile | 2 +- version/docs/version.asciidoc | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.go-version b/.go-version index 04a8bc26d16..d6f3a382b34 100644 --- a/.go-version +++ b/.go-version @@ -1 +1 @@ -1.18.6 +1.18.7 diff --git a/Dockerfile b/Dockerfile index 78bc8928198..fd56ef5e2ff 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -ARG GO_VERSION=1.18.6 +ARG GO_VERSION=1.18.7 FROM circleci/golang:${GO_VERSION} diff --git a/version/docs/version.asciidoc b/version/docs/version.asciidoc index db48ba622f8..0485d65c441 100644 --- a/version/docs/version.asciidoc +++ b/version/docs/version.asciidoc @@ -1,6 +1,6 @@ :stack-version: 8.3.0 :doc-branch: main -:go-version: 1.18.6 +:go-version: 1.18.7 :release-state: unreleased :python: 3.7 :docker: 1.12 From 070af5fcaee916edd0a11a3cf52a00e7f5733499 Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Wed, 12 Oct 2022 01:37:52 -0400 Subject: [PATCH 163/180] [Automation] Update elastic stack version to 8.6.0-5a8d757d for testing (#1480) Co-authored-by: apmmachine --- testing/environments/snapshot.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/testing/environments/snapshot.yml b/testing/environments/snapshot.yml index 66813f6f057..976d846eb52 100644 --- a/testing/environments/snapshot.yml +++ b/testing/environments/snapshot.yml @@ -3,7 +3,7 @@ version: '2.3' services: elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch:8.6.0-c49fac70-SNAPSHOT + image: docker.elastic.co/elasticsearch/elasticsearch:8.6.0-5a8d757d-SNAPSHOT # When extend is used it merges healthcheck.tests, see: # https://github.com/docker/compose/issues/8962 # healthcheck: @@ -42,7 +42,7 @@ services: - ./docker/logstash/pki:/etc/pki:ro kibana: - image: docker.elastic.co/kibana/kibana:8.6.0-c49fac70-SNAPSHOT + image: docker.elastic.co/kibana/kibana:8.6.0-5a8d757d-SNAPSHOT environment: - "ELASTICSEARCH_USERNAME=kibana_system_user" - "ELASTICSEARCH_PASSWORD=testing" From cd6ad3d121d97bbfc09444f13bf0622f2e6425f1 Mon Sep 17 00:00:00 2001 From: Craig MacKenzie Date: Wed, 12 Oct 2022 16:45:28 -0400 Subject: [PATCH 164/180] Improve logging around agent checkins. (#1477) Improve logging around agent checkins. - Log transient checkin errors at Info. - Upgrade to an Error log after 2 repeated failures. - Log the wait time for the next retry. - Only update local state after repeated failures. --- CHANGELOG.next.asciidoc | 1 + ...5517984-improve-checkin-error-logging.yaml | 5 ++ .../gateway/fleet/fleet_gateway.go | 39 +++++++++------ .../gateway/fleet/fleet_gateway_test.go | 20 ++++++-- internal/pkg/core/backoff/backoff.go | 5 ++ internal/pkg/core/backoff/backoff_test.go | 50 +++++++++++++++---- internal/pkg/core/backoff/equal_jitter.go | 17 +++++-- internal/pkg/core/backoff/exponential.go | 17 ++++--- internal/pkg/fleetapi/checkin_cmd.go | 23 +++++---- internal/pkg/fleetapi/checkin_cmd_test.go | 20 +++++--- 10 files changed, 140 insertions(+), 57 deletions(-) create mode 100644 changelog/fragments/1665517984-improve-checkin-error-logging.yaml diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index f8033809cb4..e937813e86d 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -210,3 +210,4 @@ - Add support for hints' based autodiscovery in kubernetes provider. {pull}[698] - Improve logging during upgrades. {pull}[1287] - Added status message to CheckinRequest {pull}[1369] +- Improve logging of Fleet checkins errors. {pull}[1477] diff --git a/changelog/fragments/1665517984-improve-checkin-error-logging.yaml b/changelog/fragments/1665517984-improve-checkin-error-logging.yaml new file mode 100644 index 00000000000..7bf2777d9d5 --- /dev/null +++ b/changelog/fragments/1665517984-improve-checkin-error-logging.yaml @@ -0,0 +1,5 @@ +kind: enhancement +summary: Improve logging of Fleet check-in errors. +description: Improve logging of Fleet check-in errors and only report the local state as degraded after two consecutive failed check-ins. +pr: 1477 +issue: 1154 diff --git a/internal/pkg/agent/application/gateway/fleet/fleet_gateway.go b/internal/pkg/agent/application/gateway/fleet/fleet_gateway.go index 897b81ea0d3..9ebebcf2c0f 100644 --- a/internal/pkg/agent/application/gateway/fleet/fleet_gateway.go +++ b/internal/pkg/agent/application/gateway/fleet/fleet_gateway.go @@ -173,7 +173,7 @@ func (f *fleetGateway) worker() { // Execute the checkin call and for any errors returned by the fleet-server API // the function will retry to communicate with fleet-server with an exponential delay and some // jitter to help better distribute the load from a fleet of agents. - resp, err := f.doExecute() + resp, err := f.executeCheckinWithRetries() if err != nil { continue } @@ -274,21 +274,34 @@ func (f *fleetGateway) gatherQueuedActions(ts time.Time) (queued, expired []flee return queued, expired } -func (f *fleetGateway) doExecute() (*fleetapi.CheckinResponse, error) { +func (f *fleetGateway) executeCheckinWithRetries() (*fleetapi.CheckinResponse, error) { f.backoff.Reset() // Guard if the context is stopped by a out of bound call, // this mean we are rebooting to change the log level or the system is shutting us down. for f.bgContext.Err() == nil { f.log.Debugf("Checkin started") - resp, err := f.execute(f.bgContext) + resp, took, err := f.executeCheckin(f.bgContext) if err != nil { f.checkinFailCounter++ - f.log.Errorf("Could not communicate with fleet-server checkin API will retry, error: %s", err) + + // Report the first two failures at warn level as they may be recoverable with retries. + if f.checkinFailCounter <= 2 { + f.log.Warnw("Possible transient error during checkin with fleet-server, retrying", + "error.message", err, "request_duration_ns", took, "failed_checkins", f.checkinFailCounter, + "retry_after_ns", f.backoff.NextWait()) + } else { + // Only update the local status after repeated failures: https://github.com/elastic/elastic-agent/issues/1148 + f.localReporter.Update(state.Degraded, fmt.Sprintf("checkin failed: %v", err), nil) + f.log.Errorw("Cannot checkin in with fleet-server, retrying", + "error.message", err, "request_duration_ns", took, "failed_checkins", f.checkinFailCounter, + "retry_after_ns", f.backoff.NextWait()) + } + if !f.backoff.Wait() { // Something bad has happened and we log it and we should update our current state. err := errors.New( - "execute retry loop was stopped", + "checkin retry loop was stopped", errors.TypeNetwork, errors.M(errors.MetaKeyURI, f.client.URI()), ) @@ -297,10 +310,6 @@ func (f *fleetGateway) doExecute() (*fleetapi.CheckinResponse, error) { f.localReporter.Update(state.Failed, err.Error(), nil) return nil, err } - if f.checkinFailCounter > 1 { - f.localReporter.Update(state.Degraded, fmt.Sprintf("checkin failed: %v", err), nil) - f.log.Errorf("checkin number %d failed: %s", f.checkinFailCounter, err.Error()) - } continue } @@ -319,7 +328,7 @@ func (f *fleetGateway) doExecute() (*fleetapi.CheckinResponse, error) { return nil, f.bgContext.Err() } -func (f *fleetGateway) execute(ctx context.Context) (*fleetapi.CheckinResponse, error) { +func (f *fleetGateway) executeCheckin(ctx context.Context) (*fleetapi.CheckinResponse, time.Duration, error) { ecsMeta, err := info.Metadata() if err != nil { f.log.Error(errors.New("failed to load metadata", err)) @@ -340,7 +349,7 @@ func (f *fleetGateway) execute(ctx context.Context) (*fleetapi.CheckinResponse, Message: f.statusController.Status().Message, } - resp, err := cmd.Execute(ctx, req) + resp, took, err := cmd.Execute(ctx, req) if isUnauth(err) { f.unauthCounter++ @@ -348,15 +357,15 @@ func (f *fleetGateway) execute(ctx context.Context) (*fleetapi.CheckinResponse, f.log.Warnf("received an invalid api key error '%d' times. Starting to unenroll the elastic agent.", f.unauthCounter) return &fleetapi.CheckinResponse{ Actions: []fleetapi.Action{&fleetapi.ActionUnenroll{ActionID: "", ActionType: "UNENROLL", IsDetected: true}}, - }, nil + }, took, nil } - return nil, err + return nil, took, err } f.unauthCounter = 0 if err != nil { - return nil, err + return nil, took, err } // Save the latest ackToken @@ -368,7 +377,7 @@ func (f *fleetGateway) execute(ctx context.Context) (*fleetapi.CheckinResponse, } } - return resp, nil + return resp, took, nil } // shouldUnenroll checks if the max number of trying an invalid key is reached diff --git a/internal/pkg/agent/application/gateway/fleet/fleet_gateway_test.go b/internal/pkg/agent/application/gateway/fleet/fleet_gateway_test.go index 0cc00e739a8..1860782a1e7 100644 --- a/internal/pkg/agent/application/gateway/fleet/fleet_gateway_test.go +++ b/internal/pkg/agent/application/gateway/fleet/fleet_gateway_test.go @@ -21,6 +21,7 @@ import ( "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" + "github.com/elastic/elastic-agent-libs/logp" "github.com/elastic/elastic-agent/internal/pkg/agent/application/gateway" "github.com/elastic/elastic-agent/internal/pkg/agent/application/paths" "github.com/elastic/elastic-agent/internal/pkg/agent/storage" @@ -693,7 +694,7 @@ func TestRetriesOnFailures(t *testing.T) { scheduler := scheduler.NewStepper() client := newTestingClient() dispatcher := newTestingDispatcher() - log, _ := logger.New("fleet_gateway", false) + log := newInfoLogger(t, "fleet_gateway") ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -706,8 +707,8 @@ func TestRetriesOnFailures(t *testing.T) { queue.On("Actions").Return([]fleetapi.Action{}) localReporter := &testutils.MockReporter{} - localReporter.On("Update", state.Degraded, mock.Anything, mock.Anything).Times(2) - localReporter.On("Update", mock.Anything, mock.Anything, mock.Anything).Maybe() + // The local state should only be reported as degraded after two consecutive failures. + localReporter.On("Update", state.Degraded, mock.Anything, mock.Anything).Once() localReporter.On("Unregister").Maybe() fleetReporter := &testutils.MockReporter{} @@ -814,3 +815,16 @@ type testAgentInfo struct{} func (testAgentInfo) AgentID() string { return "agent-secret" } type request struct{} + +func newInfoLogger(t *testing.T, name string) *logger.Logger { + t.Helper() + + loggerCfg := logger.DefaultLoggingConfig() + loggerCfg.Level = logp.InfoLevel + loggerCfg.ToFiles = false + loggerCfg.ToStderr = true + + log, err := logger.NewFromConfig("", loggerCfg, false) + require.NoError(t, err) + return log +} diff --git a/internal/pkg/core/backoff/backoff.go b/internal/pkg/core/backoff/backoff.go index 06723e7db9a..c97eaae199d 100644 --- a/internal/pkg/core/backoff/backoff.go +++ b/internal/pkg/core/backoff/backoff.go @@ -4,11 +4,16 @@ package backoff +import "time" + // Backoff defines the interface for backoff strategies. type Backoff interface { // Wait blocks for a duration of time governed by the backoff strategy. Wait() bool + // NextWait returns the duration of the next call to Wait(). + NextWait() time.Duration + // Reset resets the backoff duration to an initial value governed by the backoff strategy. Reset() } diff --git a/internal/pkg/core/backoff/backoff_test.go b/internal/pkg/core/backoff/backoff_test.go index 88498ff5a58..12332eb15f2 100644 --- a/internal/pkg/core/backoff/backoff_test.go +++ b/internal/pkg/core/backoff/backoff_test.go @@ -14,14 +14,9 @@ import ( type factory func(<-chan struct{}) Backoff -func TestBackoff(t *testing.T) { - t.Run("test close channel", testCloseChannel) - t.Run("test unblock after some time", testUnblockAfterInit) -} - -func testCloseChannel(t *testing.T) { - init := 2 * time.Second - max := 5 * time.Minute +func TestCloseChannel(t *testing.T) { + init := 2 * time.Millisecond + max := 5 * time.Second tests := map[string]factory{ "ExpBackoff": func(done <-chan struct{}) Backoff { @@ -42,9 +37,9 @@ func testCloseChannel(t *testing.T) { } } -func testUnblockAfterInit(t *testing.T) { - init := 1 * time.Second - max := 5 * time.Minute +func TestUnblockAfterInit(t *testing.T) { + init := 1 * time.Millisecond + max := 5 * time.Second tests := map[string]factory{ "ExpBackoff": func(done <-chan struct{}) Backoff { @@ -68,3 +63,36 @@ func testUnblockAfterInit(t *testing.T) { }) } } + +func TestNextWait(t *testing.T) { + init := time.Millisecond + max := 5 * time.Second + + tests := map[string]factory{ + "ExpBackoff": func(done <-chan struct{}) Backoff { + return NewExpBackoff(done, init, max) + }, + "EqualJitterBackoff": func(done <-chan struct{}) Backoff { + return NewEqualJitterBackoff(done, init, max) + }, + } + + for name, f := range tests { + t.Run(name, func(t *testing.T) { + c := make(chan struct{}) + b := f(c) + + startWait := b.NextWait() + assert.Equal(t, startWait, b.NextWait(), "next wait not stable") + + startedAt := time.Now() + b.Wait() + waitDuration := time.Now().Sub(startedAt) + nextWait := b.NextWait() + + t.Logf("actualWait: %s startWait: %s nextWait: %s", waitDuration, startWait, nextWait) + assert.Less(t, startWait, nextWait, "wait value did not increase") + assert.GreaterOrEqual(t, waitDuration, startWait, "next wait duration <= actual wait duration") + }) + } +} diff --git a/internal/pkg/core/backoff/equal_jitter.go b/internal/pkg/core/backoff/equal_jitter.go index d87077397cd..671201f5892 100644 --- a/internal/pkg/core/backoff/equal_jitter.go +++ b/internal/pkg/core/backoff/equal_jitter.go @@ -16,8 +16,9 @@ type EqualJitterBackoff struct { duration time.Duration done <-chan struct{} - init time.Duration - max time.Duration + init time.Duration + max time.Duration + nextRand time.Duration last time.Time } @@ -29,6 +30,7 @@ func NewEqualJitterBackoff(done <-chan struct{}, init, max time.Duration) Backof done: done, init: init, max: max, + nextRand: time.Duration(rand.Int63n(int64(init))), //nolint:gosec } } @@ -38,13 +40,18 @@ func (b *EqualJitterBackoff) Reset() { b.duration = b.init * 2 } +func (b *EqualJitterBackoff) NextWait() time.Duration { + // Make sure we have always some minimal back off and jitter. + temp := b.duration / 2 + return temp + b.nextRand +} + // Wait block until either the timer is completed or channel is done. func (b *EqualJitterBackoff) Wait() bool { - // Make sure we have always some minimal back off and jitter. - temp := int64(b.duration / 2) - backoff := time.Duration(temp + rand.Int63n(temp)) + backoff := b.NextWait() // increase duration for next wait. + b.nextRand = time.Duration(rand.Int63n(int64(b.duration))) b.duration *= 2 if b.duration > b.max { b.duration = b.max diff --git a/internal/pkg/core/backoff/exponential.go b/internal/pkg/core/backoff/exponential.go index 81224b95eb5..51b5b4e0cb5 100644 --- a/internal/pkg/core/backoff/exponential.go +++ b/internal/pkg/core/backoff/exponential.go @@ -36,18 +36,23 @@ func (b *ExpBackoff) Reset() { b.duration = b.init } +func (b *ExpBackoff) NextWait() time.Duration { + nextWait := b.duration + nextWait *= 2 + if nextWait > b.max { + nextWait = b.max + } + return nextWait +} + // Wait block until either the timer is completed or channel is done. func (b *ExpBackoff) Wait() bool { - backoff := b.duration - b.duration *= 2 - if b.duration > b.max { - b.duration = b.max - } + b.duration = b.NextWait() select { case <-b.done: return false - case <-time.After(backoff): + case <-time.After(b.duration): b.last = time.Now() return true } diff --git a/internal/pkg/fleetapi/checkin_cmd.go b/internal/pkg/fleetapi/checkin_cmd.go index d6a63a45e29..f79c6bab8bc 100644 --- a/internal/pkg/fleetapi/checkin_cmd.go +++ b/internal/pkg/fleetapi/checkin_cmd.go @@ -78,23 +78,26 @@ func NewCheckinCmd(info agentInfo, client client.Sender) *CheckinCmd { } } -// Execute enroll the Agent in the Fleet Server. -func (e *CheckinCmd) Execute(ctx context.Context, r *CheckinRequest) (*CheckinResponse, error) { +// Execute enroll the Agent in the Fleet Server. Returns the decoded check in response, a duration indicating +// how long the request took, and an error. +func (e *CheckinCmd) Execute(ctx context.Context, r *CheckinRequest) (*CheckinResponse, time.Duration, error) { if err := r.Validate(); err != nil { - return nil, err + return nil, 0, err } b, err := json.Marshal(r) if err != nil { - return nil, errors.New(err, + return nil, 0, errors.New(err, "fail to encode the checkin request", errors.TypeUnexpected) } cp := fmt.Sprintf(checkingPath, e.info.AgentID()) + sendStart := time.Now() resp, err := e.client.Send(ctx, "POST", cp, nil, nil, bytes.NewBuffer(b)) + sendDuration := time.Now().Sub(sendStart) if err != nil { - return nil, errors.New(err, + return nil, sendDuration, errors.New(err, "fail to checkin to fleet-server", errors.TypeNetwork, errors.M(errors.MetaKeyURI, cp)) @@ -102,26 +105,26 @@ func (e *CheckinCmd) Execute(ctx context.Context, r *CheckinRequest) (*CheckinRe defer resp.Body.Close() if resp.StatusCode != http.StatusOK { - return nil, client.ExtractError(resp.Body) + return nil, sendDuration, client.ExtractError(resp.Body) } rs, err := ioutil.ReadAll(resp.Body) if err != nil { - return nil, errors.New(err, "failed to read checkin response") + return nil, sendDuration, errors.New(err, "failed to read checkin response") } checkinResponse := &CheckinResponse{} decoder := json.NewDecoder(bytes.NewReader(rs)) if err := decoder.Decode(checkinResponse); err != nil { - return nil, errors.New(err, + return nil, sendDuration, errors.New(err, "fail to decode checkin response", errors.TypeNetwork, errors.M(errors.MetaKeyURI, cp)) } if err := checkinResponse.Validate(); err != nil { - return nil, err + return nil, sendDuration, err } - return checkinResponse, nil + return checkinResponse, sendDuration, nil } diff --git a/internal/pkg/fleetapi/checkin_cmd_test.go b/internal/pkg/fleetapi/checkin_cmd_test.go index 2d9aef2741a..56726bb5559 100644 --- a/internal/pkg/fleetapi/checkin_cmd_test.go +++ b/internal/pkg/fleetapi/checkin_cmd_test.go @@ -11,6 +11,7 @@ import ( "io/ioutil" "net/http" "testing" + "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -25,6 +26,7 @@ func (*agentinfo) AgentID() string { return "id" } func TestCheckin(t *testing.T) { const withAPIKey = "secret" + const requestDelay = time.Millisecond ctx := context.Background() agentInfo := &agentinfo{} @@ -39,6 +41,8 @@ func TestCheckin(t *testing.T) { mux.HandleFunc(path, authHandler(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusInternalServerError) fmt.Fprint(w, raw) + // Introduce a small delay to test the request time measurment. + time.Sleep(requestDelay) }, withAPIKey)) return mux }, withAPIKey, @@ -47,8 +51,10 @@ func TestCheckin(t *testing.T) { request := CheckinRequest{} - _, err := cmd.Execute(ctx, &request) + _, took, err := cmd.Execute(ctx, &request) require.Error(t, err) + // Ensure the request took at least as long as the artificial delay. + require.GreaterOrEqual(t, took, requestDelay) }, )) @@ -96,7 +102,7 @@ func TestCheckin(t *testing.T) { request := CheckinRequest{} - r, err := cmd.Execute(ctx, &request) + r, _, err := cmd.Execute(ctx, &request) require.NoError(t, err) require.Equal(t, 1, len(r.Actions)) @@ -157,7 +163,7 @@ func TestCheckin(t *testing.T) { request := CheckinRequest{} - r, err := cmd.Execute(ctx, &request) + r, _, err := cmd.Execute(ctx, &request) require.NoError(t, err) require.Equal(t, 2, len(r.Actions)) @@ -173,7 +179,7 @@ func TestCheckin(t *testing.T) { }, )) - t.Run("When we receive no action", withServerWithAuthClient( + t.Run("When we receive no action with delay", withServerWithAuthClient( func(t *testing.T) *http.ServeMux { raw := `{ "actions": [] }` mux := http.NewServeMux() @@ -189,7 +195,7 @@ func TestCheckin(t *testing.T) { request := CheckinRequest{} - r, err := cmd.Execute(ctx, &request) + r, _, err := cmd.Execute(ctx, &request) require.NoError(t, err) require.Equal(t, 0, len(r.Actions)) @@ -223,7 +229,7 @@ func TestCheckin(t *testing.T) { request := CheckinRequest{Metadata: testMetadata()} - r, err := cmd.Execute(ctx, &request) + r, _, err := cmd.Execute(ctx, &request) require.NoError(t, err) require.Equal(t, 0, len(r.Actions)) @@ -257,7 +263,7 @@ func TestCheckin(t *testing.T) { request := CheckinRequest{} - r, err := cmd.Execute(ctx, &request) + r, _, err := cmd.Execute(ctx, &request) require.NoError(t, err) require.Equal(t, 0, len(r.Actions)) From 12c55534a90e22ad4f641fcfef7090f99e3a1a75 Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Thu, 13 Oct 2022 01:40:06 -0400 Subject: [PATCH 165/180] [Automation] Update elastic stack version to 8.6.0-40086bc7 for testing (#1496) Co-authored-by: apmmachine --- testing/environments/snapshot.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/testing/environments/snapshot.yml b/testing/environments/snapshot.yml index 976d846eb52..d466b0bdc05 100644 --- a/testing/environments/snapshot.yml +++ b/testing/environments/snapshot.yml @@ -3,7 +3,7 @@ version: '2.3' services: elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch:8.6.0-5a8d757d-SNAPSHOT + image: docker.elastic.co/elasticsearch/elasticsearch:8.6.0-40086bc7-SNAPSHOT # When extend is used it merges healthcheck.tests, see: # https://github.com/docker/compose/issues/8962 # healthcheck: @@ -42,7 +42,7 @@ services: - ./docker/logstash/pki:/etc/pki:ro kibana: - image: docker.elastic.co/kibana/kibana:8.6.0-5a8d757d-SNAPSHOT + image: docker.elastic.co/kibana/kibana:8.6.0-40086bc7-SNAPSHOT environment: - "ELASTICSEARCH_USERNAME=kibana_system_user" - "ELASTICSEARCH_PASSWORD=testing" From d5cfe6ffb18335332f940aaacb0b0381c5f28b72 Mon Sep 17 00:00:00 2001 From: Andrew Gizas Date: Thu, 13 Oct 2022 10:04:53 +0300 Subject: [PATCH 166/180] Fixing makefile check (#1490) * Fixing makefile check --- deploy/kubernetes/Makefile | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/deploy/kubernetes/Makefile b/deploy/kubernetes/Makefile index 295b238cef5..1163b1d00a6 100644 --- a/deploy/kubernetes/Makefile +++ b/deploy/kubernetes/Makefile @@ -49,11 +49,10 @@ ci-clone-kibana-repository: cp $(FILE_REPO) $(ELASTIC_AGENT_REPO)/$(ELASTIC_AGENT_REPO_PATH) ## ci-create-kubernetes-templates-pull-request : Create the pull request for the kubernetes templates +$(eval HASDIFF =$(shell sh -c "git status | grep $(FILE_REPO) | wc -l")) .PHONY: ci-create-kubernetes-templates-pull-request ci-create-kubernetes-templates-pull-request: - HASDIFF=`git status | grep $(FILE_REPO) | wc -l`; \ - echo $${HASDIFF} -ifeq ($${HASDIFF},1) +ifeq ($(HASDIFF),1) echo "INFO: Create branch to update k8s templates" git config user.name obscloudnativemonitoring git config user.email obs-cloudnative-monitoring@elastic.co @@ -79,8 +78,7 @@ else --base main \ --head $(ELASTIC_AGENT_BRANCH) \ --reviewer elastic/obs-cloudnative-monitoring -endif - +endif else echo "No differences found with kibana git repository" endif From 259682d3eb95330619e1f0993ef35f46feb046fd Mon Sep 17 00:00:00 2001 From: Victor Martinez Date: Thu, 13 Oct 2022 09:16:59 +0100 Subject: [PATCH 167/180] action: validate changelog fragment (#1488) --- .github/workflows/changelog.yml | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) create mode 100644 .github/workflows/changelog.yml diff --git a/.github/workflows/changelog.yml b/.github/workflows/changelog.yml new file mode 100644 index 00000000000..d0f29a0fd25 --- /dev/null +++ b/.github/workflows/changelog.yml @@ -0,0 +1,17 @@ +name: Changelog +on: [pull_request] + +jobs: + fragments: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - name: Fetch Go version from .go-version + run: echo "GO_VERSION=$(cat .go-version)" >> $GITHUB_ENV + - uses: actions/setup-go@v3 + with: + go-version: ${{ env.GO_VERSION }} + - name: check pr-has-fragment + run: | + GOBIN=$PWD/bin go install github.com/elastic/elastic-agent-changelog-tool@latest + ./bin/elastic-agent-changelog-tool pr-has-fragment --repo ${{ github.event.repository.name }} ${{github.event.number}} From 5505f5862e158e926cc72fc331a30573f1c5dbe7 Mon Sep 17 00:00:00 2001 From: Andrew Gizas Date: Thu, 13 Oct 2022 11:51:35 +0300 Subject: [PATCH 168/180] Allign managed with standalone role (#1500) --- deploy/kubernetes/elastic-agent-standalone-kubernetes.yaml | 4 ++++ .../elastic-agent-standalone-role.yaml | 4 ++++ 2 files changed, 8 insertions(+) diff --git a/deploy/kubernetes/elastic-agent-standalone-kubernetes.yaml b/deploy/kubernetes/elastic-agent-standalone-kubernetes.yaml index e43a251408f..baf0ce00c94 100644 --- a/deploy/kubernetes/elastic-agent-standalone-kubernetes.yaml +++ b/deploy/kubernetes/elastic-agent-standalone-kubernetes.yaml @@ -864,6 +864,10 @@ rules: resources: - podsecuritypolicies verbs: ["get", "list", "watch"] + - apiGroups: [ "storage.k8s.io" ] + resources: + - storageclasses + verbs: [ "get", "list", "watch" ] --- apiVersion: rbac.authorization.k8s.io/v1 kind: Role diff --git a/deploy/kubernetes/elastic-agent-standalone/elastic-agent-standalone-role.yaml b/deploy/kubernetes/elastic-agent-standalone/elastic-agent-standalone-role.yaml index 8a644f3aadf..a0cd80b456a 100644 --- a/deploy/kubernetes/elastic-agent-standalone/elastic-agent-standalone-role.yaml +++ b/deploy/kubernetes/elastic-agent-standalone/elastic-agent-standalone-role.yaml @@ -63,6 +63,10 @@ rules: resources: - podsecuritypolicies verbs: ["get", "list", "watch"] + - apiGroups: [ "storage.k8s.io" ] + resources: + - storageclasses + verbs: [ "get", "list", "watch" ] --- apiVersion: rbac.authorization.k8s.io/v1 kind: Role From 890483300aba0fcd33714fa4fe9a81fb0bc4e533 Mon Sep 17 00:00:00 2001 From: Chris Mark Date: Thu, 13 Oct 2022 12:19:26 +0300 Subject: [PATCH 169/180] Fix k8s template link versioning (#1504) --- deploy/kubernetes/Makefile | 18 ++++++++++-------- .../elastic-agent-standalone-kubernetes.yaml | 2 +- .../elastic-agent-standalone-daemonset.yaml | 2 +- 3 files changed, 12 insertions(+), 10 deletions(-) diff --git a/deploy/kubernetes/Makefile b/deploy/kubernetes/Makefile index 1163b1d00a6..d7eb3241161 100644 --- a/deploy/kubernetes/Makefile +++ b/deploy/kubernetes/Makefile @@ -1,5 +1,6 @@ ALL=elastic-agent-standalone elastic-agent-managed BEAT_VERSION=$(shell head -n 1 ../../version/docs/version.asciidoc | cut -c 17- ) +BRANCH_VERSION=$(shell sed -n '2p' ../../version/docs/version.asciidoc | cut -c 14- ) #variables needed for ci-create-kubernetes-templates-pull-request ELASTIC_AGENT_REPO=kibana @@ -9,7 +10,7 @@ ELASTIC_AGENT_BRANCH=update-k8s-templates-$(shell date "+%Y%m%d%H%M%S") .PHONY: generate-k8s $(ALL) generate-k8s: $(ALL) - + test: generate-k8s for FILE in $(shell ls *-kubernetes.yaml); do \ BEAT=$$(echo $$FILE | cut -d \- -f 1); \ @@ -19,21 +20,21 @@ test: generate-k8s clean: @for f in $(ALL); do rm -f "$$f-kubernetes.yaml"; done -$(ALL): +$(ALL): ifdef WITHOUTCONFIG @echo "Generating $@-kubernetes-without-configmap.yaml" @rm -f $@-kubernetes-without-configmap.yaml @for f in $(shell ls $@/*.yaml | grep -v daemonset-configmap); do \ - sed "s/%VERSION%/VERSION/g" $$f >> $@-kubernetes-without-configmap.yaml; \ + sed -e "s/%VERSION%/VERSION/g" -e "s/%BRANCH%/${BRANCH_VERSION}/g" $$f >> $@-kubernetes-without-configmap.yaml; \ echo --- >> $@-kubernetes-without-configmap.yaml; \ done else - @echo "Generating $@-kubernetes.yaml" - @rm -f $@-kubernetes.yaml + @echo "Generating $@-kubernetes.yaml" + @rm -f $@-kubernetes.yaml @for f in $(shell ls $@/*.yaml); do \ - sed "s/%VERSION%/${BEAT_VERSION}/g" $$f >> $@-kubernetes.yaml; \ + sed -e "s/%VERSION%/${BEAT_VERSION}/g" -e "s/%BRANCH%/${BRANCH_VERSION}/g" $$f >> $@-kubernetes.yaml; \ echo --- >> $@-kubernetes.yaml; \ - done + done endif CHDIR_SHELL := $(SHELL) @@ -47,7 +48,7 @@ endef ci-clone-kibana-repository: git clone git@github.com:elastic/kibana.git cp $(FILE_REPO) $(ELASTIC_AGENT_REPO)/$(ELASTIC_AGENT_REPO_PATH) - + ## ci-create-kubernetes-templates-pull-request : Create the pull request for the kubernetes templates $(eval HASDIFF =$(shell sh -c "git status | grep $(FILE_REPO) | wc -l")) .PHONY: ci-create-kubernetes-templates-pull-request @@ -79,6 +80,7 @@ else --head $(ELASTIC_AGENT_BRANCH) \ --reviewer elastic/obs-cloudnative-monitoring endif + else echo "No differences found with kibana git repository" endif diff --git a/deploy/kubernetes/elastic-agent-standalone-kubernetes.yaml b/deploy/kubernetes/elastic-agent-standalone-kubernetes.yaml index baf0ce00c94..6de0d0b9270 100644 --- a/deploy/kubernetes/elastic-agent-standalone-kubernetes.yaml +++ b/deploy/kubernetes/elastic-agent-standalone-kubernetes.yaml @@ -662,7 +662,7 @@ spec: # - -c # - >- # mkdir -p /etc/elastic-agent/inputs.d && - # wget -O - https://github.com/elastic/elastic-agent/archive/8.3.0.tar.gz | tar xz -C /etc/elastic-agent/inputs.d --strip=5 "elastic-agent-main/deploy/kubernetes/elastic-agent-standalone/templates.d" + # wget -O - https://github.com/elastic/elastic-agent/archive/main.tar.gz | tar xz -C /etc/elastic-agent/inputs.d --strip=5 "elastic-agent-main/deploy/kubernetes/elastic-agent-standalone/templates.d" # volumeMounts: # - name: external-inputs # mountPath: /etc/elastic-agent/inputs.d diff --git a/deploy/kubernetes/elastic-agent-standalone/elastic-agent-standalone-daemonset.yaml b/deploy/kubernetes/elastic-agent-standalone/elastic-agent-standalone-daemonset.yaml index 9d865811e46..d40291d2ed1 100644 --- a/deploy/kubernetes/elastic-agent-standalone/elastic-agent-standalone-daemonset.yaml +++ b/deploy/kubernetes/elastic-agent-standalone/elastic-agent-standalone-daemonset.yaml @@ -34,7 +34,7 @@ spec: # - -c # - >- # mkdir -p /etc/elastic-agent/inputs.d && - # wget -O - https://github.com/elastic/elastic-agent/archive/%VERSION%.tar.gz | tar xz -C /etc/elastic-agent/inputs.d --strip=5 "elastic-agent-main/deploy/kubernetes/elastic-agent-standalone/templates.d" + # wget -O - https://github.com/elastic/elastic-agent/archive/%BRANCH%.tar.gz | tar xz -C /etc/elastic-agent/inputs.d --strip=5 "elastic-agent-main/deploy/kubernetes/elastic-agent-standalone/templates.d" # volumeMounts: # - name: external-inputs # mountPath: /etc/elastic-agent/inputs.d From 7f5450b9843de1b5d9aa725faff2d5661735c162 Mon Sep 17 00:00:00 2001 From: Andrew Gizas Date: Thu, 13 Oct 2022 13:32:31 +0300 Subject: [PATCH 170/180] Allighningmanifests (#1507) * Allign managed with standalone role * Fixing missing Label --- deploy/kubernetes/Makefile | 1 - deploy/kubernetes/elastic-agent-managed-kubernetes.yaml | 2 +- .../elastic-agent-managed/elastic-agent-managed-daemonset.yaml | 2 +- deploy/kubernetes/elastic-agent-standalone-kubernetes.yaml | 2 +- .../elastic-agent-standalone-daemonset-configmap.yaml | 2 +- 5 files changed, 4 insertions(+), 5 deletions(-) diff --git a/deploy/kubernetes/Makefile b/deploy/kubernetes/Makefile index d7eb3241161..98e216142b7 100644 --- a/deploy/kubernetes/Makefile +++ b/deploy/kubernetes/Makefile @@ -75,7 +75,6 @@ else --title "Update kubernetes templates for elastic-agent" \ --body "Automated by ${BUILD_URL}" \ --label automation \ - --label release_note:automation \ --base main \ --head $(ELASTIC_AGENT_BRANCH) \ --reviewer elastic/obs-cloudnative-monitoring diff --git a/deploy/kubernetes/elastic-agent-managed-kubernetes.yaml b/deploy/kubernetes/elastic-agent-managed-kubernetes.yaml index 1f3c3d8ec9b..3a41910c51a 100644 --- a/deploy/kubernetes/elastic-agent-managed-kubernetes.yaml +++ b/deploy/kubernetes/elastic-agent-managed-kubernetes.yaml @@ -1,4 +1,4 @@ -# For more information refer to https://www.elastic.co/guide/en/fleet/current/running-on-kubernetes-managed-by-fleet.html +# For more information https://www.elastic.co/guide/en/fleet/current/running-on-kubernetes-managed-by-fleet.html apiVersion: apps/v1 kind: DaemonSet metadata: diff --git a/deploy/kubernetes/elastic-agent-managed/elastic-agent-managed-daemonset.yaml b/deploy/kubernetes/elastic-agent-managed/elastic-agent-managed-daemonset.yaml index 17959a4febe..e1b85082ac3 100644 --- a/deploy/kubernetes/elastic-agent-managed/elastic-agent-managed-daemonset.yaml +++ b/deploy/kubernetes/elastic-agent-managed/elastic-agent-managed-daemonset.yaml @@ -1,4 +1,4 @@ -# For more information refer to https://www.elastic.co/guide/en/fleet/current/running-on-kubernetes-managed-by-fleet.html +# For more information https://www.elastic.co/guide/en/fleet/current/running-on-kubernetes-managed-by-fleet.html apiVersion: apps/v1 kind: DaemonSet metadata: diff --git a/deploy/kubernetes/elastic-agent-standalone-kubernetes.yaml b/deploy/kubernetes/elastic-agent-standalone-kubernetes.yaml index 6de0d0b9270..373282a4c1b 100644 --- a/deploy/kubernetes/elastic-agent-standalone-kubernetes.yaml +++ b/deploy/kubernetes/elastic-agent-standalone-kubernetes.yaml @@ -1,4 +1,4 @@ -# For more information refer https://www.elastic.co/guide/en/fleet/current/running-on-kubernetes-standalone.html +# For more information https://www.elastic.co/guide/en/fleet/current/running-on-kubernetes-standalone.html apiVersion: v1 kind: ConfigMap metadata: diff --git a/deploy/kubernetes/elastic-agent-standalone/elastic-agent-standalone-daemonset-configmap.yaml b/deploy/kubernetes/elastic-agent-standalone/elastic-agent-standalone-daemonset-configmap.yaml index 15a24fc3c59..1a52302826d 100644 --- a/deploy/kubernetes/elastic-agent-standalone/elastic-agent-standalone-daemonset-configmap.yaml +++ b/deploy/kubernetes/elastic-agent-standalone/elastic-agent-standalone-daemonset-configmap.yaml @@ -1,4 +1,4 @@ -# For more information refer https://www.elastic.co/guide/en/fleet/current/running-on-kubernetes-standalone.html +# For more information https://www.elastic.co/guide/en/fleet/current/running-on-kubernetes-standalone.html apiVersion: v1 kind: ConfigMap metadata: From 35f12d52f77b013156acc876665a06cc17c9f95c Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Fri, 14 Oct 2022 01:37:34 -0400 Subject: [PATCH 171/180] [Automation] Update elastic stack version to 8.6.0-233dc5d4 for testing (#1515) Co-authored-by: apmmachine --- testing/environments/snapshot.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/testing/environments/snapshot.yml b/testing/environments/snapshot.yml index d466b0bdc05..41048fde8fc 100644 --- a/testing/environments/snapshot.yml +++ b/testing/environments/snapshot.yml @@ -3,7 +3,7 @@ version: '2.3' services: elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch:8.6.0-40086bc7-SNAPSHOT + image: docker.elastic.co/elasticsearch/elasticsearch:8.6.0-233dc5d4-SNAPSHOT # When extend is used it merges healthcheck.tests, see: # https://github.com/docker/compose/issues/8962 # healthcheck: @@ -42,7 +42,7 @@ services: - ./docker/logstash/pki:/etc/pki:ro kibana: - image: docker.elastic.co/kibana/kibana:8.6.0-40086bc7-SNAPSHOT + image: docker.elastic.co/kibana/kibana:8.6.0-233dc5d4-SNAPSHOT environment: - "ELASTICSEARCH_USERNAME=kibana_system_user" - "ELASTICSEARCH_PASSWORD=testing" From 6eadeeff9a8a4ae043d053308deebee07d2f0175 Mon Sep 17 00:00:00 2001 From: Edoardo Tenani <526307+endorama@users.noreply.github.com> Date: Fri, 14 Oct 2022 09:53:20 +0200 Subject: [PATCH 172/180] Convert CHANGELOG.next to fragments (#1244) --- CHANGELOG.next.asciidoc | 213 ------------------ README.md | 1 + ...ion-when-installing-the-Elastic-Agent.yaml | 3 + ...SHA-1-are-now-rejected-See-the-Go-118.yaml | 3 + ...rjack-input-type-to-the-Filebeat-spec.yaml | 3 + ...-autodiscovery-in-kubernetes-provider.yaml | 3 + ...ource-URI-when-downloading-components.yaml | 3 + ...nly-events-so-that-degraded-fleet-che.yaml | 4 + ...30732-Improve-logging-during-upgrades.yaml | 3 + ...ssage-after-previous-checkin-failures.yaml | 3 + 10 files changed, 26 insertions(+), 213 deletions(-) delete mode 100644 CHANGELOG.next.asciidoc create mode 100644 changelog/fragments/1660139385-Fix-a-panic-caused-by-a-race-condition-when-installing-the-Elastic-Agent.yaml create mode 100644 changelog/fragments/1660158319-Upgrade-to-Go-118-Certificates-signed-with-SHA-1-are-now-rejected-See-the-Go-118.yaml create mode 100644 changelog/fragments/1661188787-Add-lumberjack-input-type-to-the-Filebeat-spec.yaml create mode 100644 changelog/fragments/1663143487-Add-support-for-hints-based-autodiscovery-in-kubernetes-provider.yaml create mode 100644 changelog/fragments/1664177394-Fix-unintended-reset-of-source-URI-when-downloading-components.yaml create mode 100644 changelog/fragments/1664212969-Create-separate-status-reporter-for-local-only-events-so-that-degraded-fleet-che.yaml create mode 100644 changelog/fragments/1664230732-Improve-logging-during-upgrades.yaml create mode 100644 changelog/fragments/1664360554-Add-success-log-message-after-previous-checkin-failures.yaml diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc deleted file mode 100644 index e937813e86d..00000000000 --- a/CHANGELOG.next.asciidoc +++ /dev/null @@ -1,213 +0,0 @@ -// Use these for links to issue and pulls. Note issues and pulls redirect one to -// each other on Github, so don't worry too much on using the right prefix. -:issue-beats: https://github.com/elastic/beats/issues/ -:pull-beats: https://github.com/elastic/beats/pull/ - -:issue: https://github.com/elastic/elastic-agent/issues/ -:pull: https://github.com/elastic/elastic-agent/pull/ - -=== Elastic Agent version HEAD - -==== Breaking changes - -- Docker container is not run as root by default. {pull-beats}[21213] -- Read Fleet connection information from `fleet.*` instead of `fleet.kibana.*`. {pull-beats}[24713] -- Beats build for 32Bit Windows or Linux system will refuse to run on a 64bit system. {pull-beats}[25186] -- Remove the `--kibana-url` from `install` and `enroll` command. {pull-beats}[25529] -- Default to port 80 and 443 for Kibana and Fleet Server connections. {pull-beats}[25723] -- Remove deprecated/undocumented IncludeCreatorMetadata setting from kubernetes metadata config options {pull-beats}[28006] -- The `/processes/` endpoint proxies to the subprocess's monitoring endpoint, instead of querying its `/stats` endpoint {pull-beats}[28165] -- Remove username/password for fleet-server authentication. {pull-beats}[29458] -- Upgrade to Go 1.18. Certificates signed with SHA-1 are now rejected. See the Go 1.18 https://tip.golang.org/doc/go1.18#sha1[release notes] for details. {pull}832[832] - -==== Bugfixes -- Fix rename *ConfigChange to *PolicyChange to align on changes in the UI. {pull-beats}[20779] -- Thread safe sorted set {pull-beats}[21290] -- Copy Action store on upgrade {pull-beats}[21298] -- Include inputs in action store actions {pull-beats}[21298] -- Fix issue where inputs without processors defined would panic {pull-beats}[21628] -- Prevent reporting ecs version twice {pull-beats}[21616] -- Partial extracted beat result in failure to spawn beat {issue-beats}[21718] -- Use symlink path for reexecutions {pull-beats}[21835] -- Use ML_SYSTEM to detect if agent is running as a service {pull-beats}[21884] -- Use local temp instead of system one {pull-beats}[21883] -- Rename monitoring index from `elastic.agent` to `elastic_agent` {pull-beats}[21932] -- Fix issue with named pipes on Windows 7 {pull-beats}[21931] -- Fix missing elastic_agent event data {pull-beats}[21994] -- Ensure shell wrapper path exists before writing wrapper on install {pull-beats}[22144] -- Fix deb/rpm packaging for Elastic Agent {pull-beats}[22153] -- Fix composable input processor promotion to fix duplicates {pull-beats}[22344] -- Fix sysv init files for deb/rpm installation {pull-beats}[22543] -- Fix shell wrapper for deb/rpm packaging {pull-beats}[23038] -- Fixed parsing of npipe URI {pull-beats}[22978] -- Select default agent policy if no enrollment token provided. {pull-beats}[23973] -- Remove artifacts on transient download errors {pull-beats}[23235] -- Support for linux/arm64 {pull-beats}[23479] -- Skip top level files when unziping archive during upgrade {pull-beats}[23456] -- Do not take ownership of Endpoint log path {pull-beats}[23444] -- Fixed fetching DBus service PID {pull-beats}[23496] -- Fix issue of missing log messages from filebeat monitor {pull-beats}[23514] -- Increase checkin grace period to 30 seconds {pull-beats}[23568] -- Fix libbeat from reporting back degraded on config update {pull-beats}[23537] -- Rewrite check if agent is running with admin rights on Windows {pull-beats}[23970] -- Fix issues with dynamic inputs and conditions {pull-beats}[23886] -- Fix bad substitution of API key. {pull-beats}[24036] -- Fix docker enrollment issue related to Fleet Server change. {pull-beats}[24155] -- Improve log on failure of Endpoint Security installation. {pull-beats}[24429] -- Verify communication to Kibana before updating Fleet client. {pull-beats}[24489] -- Fix nil pointer when null is generated as list item. {issue-beats}[23734] -- Add support for filestream input. {pull-beats}[24820] -- Add check for URL set when cert and cert key. {pull-beats}[24904] -- Fix install command for Fleet Server bootstrap, remove need for --enrollment-token when using --fleet-server {pull-beats}[24981] -- Respect host configuration for exposed processes endpoint {pull-beats}[25114] -- Set --inscure in container when FLEET_SERVER_ENABLE and FLEET_INSECURE set {pull-beats}[25137] -- Fixed: limit for retries to Kibana configurable {issue-beats}[25063] -- Fix issue with status and inspect inside of container {pull-beats}[25204] -- Remove FLEET_SERVER_POLICY_NAME env variable as it was not used {pull-beats}[25149] -- Reduce log level for listener cleanup to debug {pull-beats} -- Passing in policy id to container command works {pull-beats}[25352] -- Reduce log level for listener cleanup to debug {pull-beats}[25274] -- Delay the restart of application when a status report of failure is given {pull-beats}[25339] -- Don't log when upgrade capability doesn't apply {pull-beats}[25386] -- Fixed issue when unversioned home is set and invoked watcher failing with ENOENT {issue-beats}[25371] -- Fixed Elastic Agent: expecting Dict and received *transpiler.Key for '0' {issue-beats}[24453] -- Fix AckBatch to do nothing when no actions passed {pull-beats}[25562] -- Add error log entry when listener creation fails {issue-beats}[23482] -- Handle case where policy doesn't contain Fleet connection information {pull-beats}[25707] -- Fix fleet-server.yml spec to not overwrite existing keys {pull-beats}[25741] -- Agent sends wrong log level to Endpoint {issue-beats}[25583] -- Fix startup with failing configuration {pull-beats}[26057] -- Change timestamp in elatic-agent-json.log to use UTC {issue-beats}[25391] -- Fix add support for Logstash output. {pull-beats}[24305] -- Do not log Elasticsearch configuration for monitoring output when running with debug. {pull-beats}[26583] -- Fix issue where proxy enrollment options broke enrollment command. {pull-beats}[26749] -- Remove symlink.prev from previously failed upgrade {pull-beats}[26785] -- Fix apm-server supported outputs not being in sync with supported output types. {pull-beats}[26885] -- Set permissions during installation {pull-beats}[26665] -- Disable monitoring during fleet-server bootstrapping. {pull-beats}[27222] -- Fix issue with atomic extract running in K8s {pull-beats}[27396] -- Fix issue with install directory in state path in K8s {pull-beats}[27396] -- Disable monitoring during fleet-server bootstrapping. {pull-beats}[27222] -- Change output.elasticsearch.proxy_disabled flag to output.elasticsearch.proxy_disable so fleet uses it. {issue-beats}[27670] {pull-beats}[27671] -- Add validation for certificate flags to ensure they are absolute paths. {pull-beats}[27779] -- Migrate state on upgrade {pull-beats}[27825] -- Add "_monitoring" suffix to monitoring instance names to remove ambiguity with the status command. {issue-beats}[25449] -- Ignore ErrNotExists when fixing permissions. {issue-beats}[27836] {pull-beats}[27846] -- Snapshot artifact lookup will use agent.download proxy settings. {issue-beats}[27903] {pull-beats}[27904] -- Fix lazy acker to only add new actions to the batch. {pull-beats}[27981] -- Allow HTTP metrics to run in bootstrap mode. Add ability to adjust timeouts for Fleet Server. {pull-beats}[28260] -- Fix agent configuration overwritten by default fleet config. {pull-beats}[29297] -- Allow agent containers to use basic auth to create a service token. {pull-beats}[29651] -- Fix issue where a failing artifact verification does not remove the bad artifact. {pull-beats}[30281] -- Reduce Elastic Agent shut down time by stopping processes concurrently {pull-beats}[29650] -- Move `context cancelled` error from fleet gateway into debug level. {pull}187[187] -- Update library containerd to 1.5.10. {pull}186[186] -- Add fleet-server to output of elastic-agent inspect output command (and diagnostic bundle). {pull}243[243] -- Update API calls that the agent makes to Kibana when running the container command. {pull}253[253] -- diagnostics collect log names are fixed on Windows machines, command will ignore failures. AgentID is included in diagnostics(and diagnostics collect) output. {issue}81[81] {issue}92[92] {issue}190[190] {pull}262[262] -- Collects stdout and stderr of applications run as a process and logs them. {issue}[88] -- Remove VerificationMode option to empty string. Default value is `full`. {issue}[184] -- diagnostics collect file mod times are set. {pull}570[570] -- Allow ':' characters in dynamic variables {issue}624[624] {pull}680[680] -- Allow the - char to appear as part of variable names in eql expressions. {issue}709[709] {pull}710[710] -- Allow the / char in variable names in eql and transpiler. {issue}715[715] {pull}718[718] -- Fix data duplication for standalone agent on Kubernetes using the default manifest {issue-beats}31512[31512] {pull}742[742] -- Agent updates will clean up unneeded artifacts. {issue}693[693] {issue}694[694] {pull}752[752] -- Use the Elastic Agent configuration directory as the root of the `inputs.d` folder. {issues}663[663] -- Fix a panic caused by a race condition when installing the Elastic Agent. {issues}806[806] -- Use at least warning level for all status logs {pull}1218[1218] -- Remove fleet event reporter and events from checkin body. {issue}993[993] -- Fix unintended reset of source URI when downloading components {pull}1252[1252] -- Create separate status reporter for local only events so that degraded fleet-checkins no longer affect health on successful fleet-checkins. {issue}1157[1157] {pull}1285[1285] -- Add success log message after previous checkin failures {pull}1327[1327] -- Fix inconsistency between kubernetes pod annotations and labels in autodiscovery templates {pull}1327[1327] -- Add permissions to elastic-agent-managed clusterrole to get, list, watch storageclasses {pull}1470[1470] -==== New features - -- Prepare packaging for endpoint and asc files {pull-beats}[20186] -- Improved version CLI {pull-beats}[20359] -- Enroll CLI now restarts running daemon {pull-beats}[20359] -- Add restart CLI cmd {pull-beats}[20359] -- Add new `synthetics/*` inputs to run Heartbeat {pull-beats}[20387] -- Users of the Docker image can now pass `FLEET_ENROLL_INSECURE=1` to include the `--insecure` flag with the `elastic-agent enroll` command {issue-beats}[20312] {pull-beats}[20713] -- Add `docker` composable dynamic provider. {pull-beats}[20842] -- Add support for dynamic inputs with providers and `{{variable|"default"}}` substitution. {pull-beats}[20839] -- Add support for EQL based condition on inputs {pull-beats}[20994] -- Send `fleet.host.id` to Endpoint Security {pull-beats}[21042] -- Add `install` and `uninstall` subcommands {pull-beats}[21206] -- Use new form of fleet API paths {pull-beats}[21478] -- Add `kubernetes` composable dynamic provider. {pull-beats}[21480] -- Send updating state {pull-beats}[21461] -- Add `elastic.agent.id` and `elastic.agent.version` to published events from filebeat and metricbeat {pull-beats}[21543] -- Add `upgrade` subcommand to perform upgrade of installed Elastic Agent {pull-beats}[21425] -- Update `fleet.yml` and Kibana hosts when a policy change updates the Kibana hosts {pull-beats}[21599] -- Update `install` command to perform enroll before starting Elastic Agent {pull-beats}[21772] -- Update `fleet.kibana.path` from a POLICY_CHANGE {pull-beats}[21804] -- Removed `install-service.ps1` and `uninstall-service.ps1` from Windows .zip packaging {pull-beats}[21694] -- Add `priority` to `AddOrUpdate` on dynamic composable input providers communication channel {pull-beats}[22352] -- Ship `endpoint-security` logs to elasticsearch {pull-beats}[22526] -- Log level reloadable from fleet {pull-beats}[22690] -- Push log level downstream {pull-beats}[22815] -- Add metrics collection for Agent {pull-beats}[22793] -- Add support for Fleet Server {pull-beats}[23736] -- Add support for enrollment with local bootstrap of Fleet Server {pull-beats}[23865] -- Add TLS support for Fleet Server {pull-beats}[24142] -- Add support for Fleet Server running under Elastic Agent {pull-beats}[24220] -- Add CA support to Elastic Agent docker image {pull-beats}[24486] -- Add k8s secrets provider for Agent {pull-beats}[24789] -- Add STATE_PATH, CONFIG_PATH, LOGS_PATH to Elastic Agent docker image {pull-beats}[24817] -- Add status subcommand {pull-beats}[24856] -- Add leader_election provider for k8s {pull-beats}[24267] -- Add --fleet-server-service-token and FLEET_SERVER_SERVICE_TOKEN options {pull-beats}[25083] -- Keep http and logging config during enroll {pull-beats}[25132] -- Log output of container to $LOGS_PATH/elastic-agent-start.log when LOGS_PATH set {pull-beats}[25150] -- Use `filestream` input for internal log collection. {pull-beats}[25660] -- Enable agent to send custom headers to kibana/ES {pull-beats}[26275] -- Set `agent.id` to the Fleet Agent ID in events published from inputs backed by Beats. {issue-beats}[21121] {pull-beats}[26394] {pull-beats}[26548] -- Add proxy support to artifact downloader and communication with fleet server. {pull-beats}[25219] -- Add proxy support to enroll command. {pull-beats}[26514] -- Enable configuring monitoring namespace {issue-beats}[26439] -- Communicate with Fleet Server over HTTP2. {pull-beats}[26474] -- Pass logging.metrics.enabled to beats to stop beats from adding metrics into their logs. {issue-beats}[26758] {pull-beats}[26828] -- Support Node and Service autodiscovery in kubernetes dynamic provider. {pull-beats}[26801] -- Increase Agent's mem limits in k8s. {pull-beats}[27153] -- Add new --enroll-delay option for install and enroll commands. {pull-beats}[27118] -- Add link to troubleshooting guide on fatal exits. {issue-beats}[26367] {pull-beats}[27236] -- Agent now adapts the beats queue size based on output settings. {issue-beats}[26638] {pull-beats}[27429] -- Support ephemeral containers in Kubernetes dynamic provider. {issue-beats}[#27020] {pull-beats}[27707] -- Add complete k8s metadata through composable provider. {pull-beats}[27691] -- Add diagnostics command to gather beat metadata. {pull-beats}[28265] -- Add diagnostics collect command to gather beat metadata, config, policy, and logs and bundle it into an archive. {pull-beats}[28461] -- Add `KIBANA_FLEET_SERVICE_TOKEN` to Elastic Agent container. {pull-beats}[28096] -- Enable pprof endpoints for beats processes. Allow pprof endpoints for elastic-agent if enabled. {pull-beats}[28983] -- Add `--pprof` flag to `elastic-agent diagnostics` and an `elastic-agent pprof` command to allow operators to gather pprof data from the agent and beats running under it. {pull-beats}[28798] -- Allow pprof endpoints for elastic-agent or beats if enabled. {pull-beats}[28983] {pull-beats}[29155] -- Add --fleet-server-es-ca-trusted-fingerprint flag to allow agent/fleet-server to work with elasticsearch clusters using self signed certs. {pull-beats}[29128] -- Discover changes in Kubernetes nodes metadata as soon as they happen. {pull-beats}[23139] -- Add results of inspect output command into archive produced by diagnostics collect. {pull-beats}[29902] -- Add support for loading input configuration from external configuration files in standalone mode. You can load inputs from YAML configuration files under the folder `{path.config}/inputs.d`. {pull-beats}[30087] -- Install command will skip install/uninstall steps when installation via package is detected on Linux distros. {pull-beats}[30289] -- Update docker/distribution dependency library to fix a security issues concerning OCI Manifest Type Confusion Issue. {pull-beats}[30462] -- Add action_input_type for the .fleet-actions-results {pull-beats}[30562] -- Add support for enabling the metrics buffer endpoint in the elastic-agent and beats it runs. diagnostics collect command will gather metrics-buffer data if enabled. {pull-beats}[30471] -- Update ack response schema and processing, add retrier for acks {pull}200[200] -- Enhance error messages and logs for process start {pull}225[225] -- Changed the default policy selection logic. When the agent has no policy id or name defined, it will fall back to defaults (defined by $FLEET_SERVER_POLICY_ID and $FLEET_DEFAULT_TOKEN_POLICY_NAME environment variables respectively). {issue-beats}[29774] {pull}226[226] -- Add Elastic APM instrumentation {pull}180[180] -- Agent can be built for `darwin/arm64`. When it's built for both `darwin/arm64` and `darwin/adm64` a universal binary is also built and packaged. {pull}203[203] -- Add support for Cloudbeat. {pull}179[179] -- Fix download verification in snapshot builds. {issue}252[252] -- Add support for kubernetes cronjobs {pull}279[279] -- Increase the download artifact timeout to 10mins and add log download statistics. {pull}308[308] -- Save the agent configuration and the state encrypted on the disk. {issue}535[535] {pull}398[398] -- Bump node.js version for heartbeat/synthetics to 16.15.0 -- Support scheduled actions and cancellation of pending actions. {issue}393[393] {pull}419[419] -- Add `@metadata.input_id` and `@metadata.stream_id` when applying the inject stream processor {pull}527[527] -- Add liveness endpoint, allow fleet-gateway component to report degraded state, add update time and messages to status output. {issue}390[390] {pull}569[569] -- Redact sensitive information on diagnostics collect command. {issue}[241] {pull}[566] -- Fix incorrectly creating a filebeat redis input when a policy contains a packetbeat redis input. {issue}[427] {pull}[700] -- Add `lumberjack` input type to the Filebeat spec. {pull}[959] -- Add support for hints' based autodiscovery in kubernetes provider. {pull}[698] -- Improve logging during upgrades. {pull}[1287] -- Added status message to CheckinRequest {pull}[1369] -- Improve logging of Fleet checkins errors. {pull}[1477] diff --git a/README.md b/README.md index b1f581b38bb..5b1f5c01b04 100644 --- a/README.md +++ b/README.md @@ -11,6 +11,7 @@ Prerequisites: - installed [mage](https://github.com/magefile/mage) - [Docker](https://docs.docker.com/get-docker/) - [X-pack](https://github.com/elastic/beats/tree/main/x-pack) to pre-exist in the parent folder of the local Git repository checkout +- [elastic-agent-changelog-tool](https://github.com/elastic/elastic-agent-changelog-tool) to add changelog fragments for changelog generation If you are on a Mac with M1 chip, don't forget to export some docker variable to be able to build for AMD ``` diff --git a/changelog/fragments/1660139385-Fix-a-panic-caused-by-a-race-condition-when-installing-the-Elastic-Agent.yaml b/changelog/fragments/1660139385-Fix-a-panic-caused-by-a-race-condition-when-installing-the-Elastic-Agent.yaml new file mode 100644 index 00000000000..19844fe2dfc --- /dev/null +++ b/changelog/fragments/1660139385-Fix-a-panic-caused-by-a-race-condition-when-installing-the-Elastic-Agent.yaml @@ -0,0 +1,3 @@ +kind: bug-fix +summary: Fix a panic caused by a race condition when installing the Elastic Agent. +pr: https://github.com/elastic/elastic-agent/pull/823 diff --git a/changelog/fragments/1660158319-Upgrade-to-Go-118-Certificates-signed-with-SHA-1-are-now-rejected-See-the-Go-118.yaml b/changelog/fragments/1660158319-Upgrade-to-Go-118-Certificates-signed-with-SHA-1-are-now-rejected-See-the-Go-118.yaml new file mode 100644 index 00000000000..f7b6ce903d3 --- /dev/null +++ b/changelog/fragments/1660158319-Upgrade-to-Go-118-Certificates-signed-with-SHA-1-are-now-rejected-See-the-Go-118.yaml @@ -0,0 +1,3 @@ +kind: breaking-change +summary: Upgrade to Go 1.18. Certificates signed with SHA-1 are now rejected. See the Go 1.18 https//tip.golang.org/doc/go1.18#sha1[release notes] for details. +pr: https://github.com/elastic/elastic-agent/pull/832 diff --git a/changelog/fragments/1661188787-Add-lumberjack-input-type-to-the-Filebeat-spec.yaml b/changelog/fragments/1661188787-Add-lumberjack-input-type-to-the-Filebeat-spec.yaml new file mode 100644 index 00000000000..9110968e91f --- /dev/null +++ b/changelog/fragments/1661188787-Add-lumberjack-input-type-to-the-Filebeat-spec.yaml @@ -0,0 +1,3 @@ +kind: feature +summary: Add `lumberjack` input type to the Filebeat spec. +pr: https://github.com/elastic/elastic-agent/pull/959 diff --git a/changelog/fragments/1663143487-Add-support-for-hints-based-autodiscovery-in-kubernetes-provider.yaml b/changelog/fragments/1663143487-Add-support-for-hints-based-autodiscovery-in-kubernetes-provider.yaml new file mode 100644 index 00000000000..04e84669955 --- /dev/null +++ b/changelog/fragments/1663143487-Add-support-for-hints-based-autodiscovery-in-kubernetes-provider.yaml @@ -0,0 +1,3 @@ +kind: feature +summary: Add support for hints' based autodiscovery in kubernetes provider. +pr: https://github.com/elastic/elastic-agent/pull/698 diff --git a/changelog/fragments/1664177394-Fix-unintended-reset-of-source-URI-when-downloading-components.yaml b/changelog/fragments/1664177394-Fix-unintended-reset-of-source-URI-when-downloading-components.yaml new file mode 100644 index 00000000000..b5712f4c193 --- /dev/null +++ b/changelog/fragments/1664177394-Fix-unintended-reset-of-source-URI-when-downloading-components.yaml @@ -0,0 +1,3 @@ +kind: bug-fix +summary: Fix unintended reset of source URI when downloading components +pr: https://github.com/elastic/elastic-agent/pull/1252 diff --git a/changelog/fragments/1664212969-Create-separate-status-reporter-for-local-only-events-so-that-degraded-fleet-che.yaml b/changelog/fragments/1664212969-Create-separate-status-reporter-for-local-only-events-so-that-degraded-fleet-che.yaml new file mode 100644 index 00000000000..a94f5b66751 --- /dev/null +++ b/changelog/fragments/1664212969-Create-separate-status-reporter-for-local-only-events-so-that-degraded-fleet-che.yaml @@ -0,0 +1,4 @@ +kind: bug-fix +summary: Create separate status reporter for local only events so that degraded fleet-checkins no longer affect health on successful fleet-checkins. +issue: https://github.com/elastic/elastic-agent/issues/1157 +pr: https://github.com/elastic/elastic-agent/pull/1285 diff --git a/changelog/fragments/1664230732-Improve-logging-during-upgrades.yaml b/changelog/fragments/1664230732-Improve-logging-during-upgrades.yaml new file mode 100644 index 00000000000..15f81e7d5ad --- /dev/null +++ b/changelog/fragments/1664230732-Improve-logging-during-upgrades.yaml @@ -0,0 +1,3 @@ +kind: feature +summary: Improve logging during upgrades. +pr: https://github.com/elastic/elastic-agent/pull/1287 diff --git a/changelog/fragments/1664360554-Add-success-log-message-after-previous-checkin-failures.yaml b/changelog/fragments/1664360554-Add-success-log-message-after-previous-checkin-failures.yaml new file mode 100644 index 00000000000..3e4ac3d91a5 --- /dev/null +++ b/changelog/fragments/1664360554-Add-success-log-message-after-previous-checkin-failures.yaml @@ -0,0 +1,3 @@ +kind: bug-fix +summary: Add success log message after previous checkin failures +pr: https://github.com/elastic/elastic-agent/pull/1327 From 9c6a43bbb8273467b249dc8c75ae16335f37f535 Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Mon, 17 Oct 2022 01:37:30 -0400 Subject: [PATCH 173/180] [Automation] Update elastic stack version to 8.6.0-54a302f0 for testing (#1531) Co-authored-by: apmmachine --- testing/environments/snapshot.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/testing/environments/snapshot.yml b/testing/environments/snapshot.yml index 41048fde8fc..df54d740103 100644 --- a/testing/environments/snapshot.yml +++ b/testing/environments/snapshot.yml @@ -3,7 +3,7 @@ version: '2.3' services: elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch:8.6.0-233dc5d4-SNAPSHOT + image: docker.elastic.co/elasticsearch/elasticsearch:8.6.0-54a302f0-SNAPSHOT # When extend is used it merges healthcheck.tests, see: # https://github.com/docker/compose/issues/8962 # healthcheck: @@ -42,7 +42,7 @@ services: - ./docker/logstash/pki:/etc/pki:ro kibana: - image: docker.elastic.co/kibana/kibana:8.6.0-233dc5d4-SNAPSHOT + image: docker.elastic.co/kibana/kibana:8.6.0-54a302f0-SNAPSHOT environment: - "ELASTICSEARCH_USERNAME=kibana_system_user" - "ELASTICSEARCH_PASSWORD=testing" From e64ea9ace444e6ba80d104e90981ea558b215d0f Mon Sep 17 00:00:00 2001 From: Craig MacKenzie Date: Mon, 17 Oct 2022 08:45:06 -0400 Subject: [PATCH 174/180] Update the linter configuration. (#1478) Sync the configuration with the one used in Beats, which has disabled the majority of the least useful linters already. --- .github/workflows/golangci-lint.yml | 8 +- .golangci.yml | 110 +++++++++++----------------- 2 files changed, 48 insertions(+), 70 deletions(-) diff --git a/.github/workflows/golangci-lint.yml b/.github/workflows/golangci-lint.yml index 8079fe1c673..62d4006737c 100644 --- a/.github/workflows/golangci-lint.yml +++ b/.github/workflows/golangci-lint.yml @@ -18,22 +18,22 @@ jobs: name: lint runs-on: ${{ matrix.os }} steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 # Uses Go version from the repository. - name: Read .go-version file id: goversion run: echo "::set-output name=version::$(cat .go-version)" - - uses: actions/setup-go@v2 + - uses: actions/setup-go@v3 with: go-version: "${{ steps.goversion.outputs.version }}" - name: golangci-lint - uses: golangci/golangci-lint-action@v2 + uses: golangci/golangci-lint-action@v3 with: # Optional: version of golangci-lint to use in form of v1.2 or v1.2.3 or `latest` to use the latest version - version: v1.45.2 + version: v1.47.2 # Give the job more time to execute. # Regarding `--whole-files`, the linter is supposed to support linting of changed a patch only but, diff --git a/.golangci.yml b/.golangci.yml index 956b4b4b573..96e131c8ade 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -12,46 +12,37 @@ issues: # Set to 0 to disable. # Default: 50 max-issues-per-linter: 0 + exclude-rules: + # Exclude package name contains '-' issue because we have at least one package with + # it on its name. + - text: "ST1003:" + linters: + - stylecheck + # From mage we are priting to the console to ourselves + - path: (.*magefile.go|.*dev-tools/mage/.*) + linters: + - forbidigo output: sort-results: true -# Uncomment and add a path if needed to exclude -# skip-dirs: -# - some/path -# skip-files: -# - ".*\\.my\\.go$" -# - lib/bad.go - # Find the whole list here https://golangci-lint.run/usage/linters/ linters: disable-all: true enable: - - deadcode # finds unused code - errcheck # checking for unchecked errors in go programs - errorlint # errorlint is a linter for that can be used to find code that will cause problems with the error wrapping scheme introduced in Go 1.13. - - goconst # finds repeated strings that could be replaced by a constant - - dupl # tool for code clone detection - forbidigo # forbids identifiers matched by reg exps - # 'replace' is used in go.mod for many dependencies that come from libbeat. We should work to remove those, - # so we can re-enable this linter. - # - gomoddirectives # manage the use of 'replace', 'retract', and 'excludes' directives in go.mod. - - gomodguard - gosimple # linter for Go source code that specializes in simplifying a code - misspell # finds commonly misspelled English words in comments - nakedret # finds naked returns in functions greater than a specified function length - - prealloc # finds slice declarations that could potentially be preallocated - nolintlint # reports ill-formed or insufficient nolint directives - staticcheck # Staticcheck is a go vet on steroids, applying a ton of static analysis checks - stylecheck # a replacement for golint - - unparam # reports unused function parameters - unused # checks Go code for unused constants, variables, functions and types - - govet # Vet examines Go source code and reports suspicious constructs, such as Printf calls whose arguments do not align with the format string - ineffassign # detects when assignments to existing variables are not used - - structcheck # finds unused struct fields - typecheck # Like the front-end of a Go compiler, parses and type-checks Go code - - varcheck # Finds unused global variables and constants - asciicheck # simple linter to check that your code does not contain non-ASCII identifiers - bodyclose # checks whether HTTP response body is closed successfully - durationcheck # check for two durations multiplied together @@ -63,14 +54,20 @@ linters: - noctx # noctx finds sending http request without context.Context - unconvert # Remove unnecessary type conversions - wastedassign # wastedassign finds wasted assignment statements. - # - godox # tool for detection of FIXME, TODO and other comment keywords + - gomodguard # check for blocked dependencies # all available settings of specific linters linters-settings: errcheck: # report about not checking of errors in type assertions: `a := b.(MyStruct)`; - # default is false: such cases aren't reported by default. - check-type-assertions: true + check-type-assertions: false + # report about assignment of errors to blank identifier: `num, _ := strconv.Atoi(numStr)`. + check-blank: false + # List of functions to exclude from checking, where each entry is a single function to exclude. + # See https://github.com/kisielk/errcheck#excluding-functions for details. + exclude-functions: + - (mapstr.M).Delete # Only returns ErrKeyNotFound, can safely be ignored. + - (mapstr.M).Put # Can only fail on type conversions, usually safe to ignore. errorlint: # Check whether fmt.Errorf uses the %w verb for formatting errors. See the readme for caveats @@ -80,16 +77,6 @@ linters-settings: # Check for plain error comparisons comparison: true - goconst: - # minimal length of string constant, 3 by default - min-len: 3 - # minimal occurrences count to trigger, 3 by default - min-occurrences: 2 - - dupl: - # tokens count to trigger issue, 150 by default - threshold: 100 - forbidigo: # Forbid the following identifiers forbid: @@ -97,68 +84,59 @@ linters-settings: # Exclude godoc examples from forbidigo checks. Default is true. exclude_godoc_examples: true - gomoddirectives: - # Allow local `replace` directives. Default is false. - replace-local: false + goimports: + local-prefixes: github.com/elastic gomodguard: blocked: # List of blocked modules. modules: - - github.com/elastic/beats/v7: - reason: "There must be no Beats dependency, use elastic-agent-libs instead." - + # Blocked module. + - github.com/pkg/errors: + # Recommended modules that should be used instead. (Optional) + recommendations: + - errors + - fmt + reason: "This package is deprecated, use `fmt.Errorf` with `%w` instead" gosimple: # Select the Go version to target. The default is '1.13'. - go: "1.17" - - misspell: - # Correct spellings using locale preferences for US or UK. - # Default is to use a neutral variety of English. - # Setting locale to US will correct the British spelling of 'colour' to 'color'. - # locale: US - # ignore-words: - # - IdP + go: "1.18.7" nakedret: # make an issue if func has more lines of code than this setting and it has naked returns; default is 30 max-func-lines: 0 - prealloc: - # Report preallocation suggestions only on simple loops that have no returns/breaks/continues/gotos in them. - # True by default. - simple: true - range-loops: true # Report preallocation suggestions on range loops, true by default - for-loops: false # Report preallocation suggestions on for loops, false by default - nolintlint: # Enable to ensure that nolint directives are all used. Default is true. allow-unused: false # Disable to ensure that nolint directives don't have a leading space. Default is true. - allow-leading-space: true + allow-leading-space: false # Exclude following linters from requiring an explanation. Default is []. allow-no-explanation: [] # Enable to require an explanation of nonzero length after each nolint directive. Default is false. require-explanation: true # Enable to require nolint directives to mention the specific linter being suppressed. Default is false. - require-specific: true + require-specific: false staticcheck: # Select the Go version to target. The default is '1.13'. - go: "1.17" + go: "1.18.7" + checks: ["all"] stylecheck: # Select the Go version to target. The default is '1.13'. - go: "1.17" - - unparam: - # Inspect exported functions, default is false. Set to true if no external program/library imports your code. - # XXX: if you enable this setting, unparam will report a lot of false-positives in text editors: - # if it's called for subdir of a project it can't find external interfaces. All text editor integrations - # with golangci-lint call it on a directory with the changed file. - check-exported: false + go: "1.18.7" + checks: ["all"] unused: # Select the Go version to target. The default is '1.13'. - go: "1.17" + go: "1.18.7" + + gosec: + excludes: + - G306 # Expect WriteFile permissions to be 0600 or less + - G404 # Use of weak random number generator + - G401 # Detect the usage of DES, RC4, MD5 or SHA1: Used in non-crypto contexts. + - G501 # Import blocklist: crypto/md5: Used in non-crypto contexts. + - G505 # Import blocklist: crypto/sha1: Used in non-crypto contexts. From d2c780b019a812b944593444dc66320f1d0aa5e8 Mon Sep 17 00:00:00 2001 From: Andrew Cholakian Date: Mon, 17 Oct 2022 10:55:17 -0500 Subject: [PATCH 175/180] Elastic agent counterpart of https://github.com/elastic/beats/pull/33362 (#1528) Always use the stack_release label for npm i No changelog necessary since there are no user-visible changes This lets us ensure we've carefully reviewed and labeled the version of the @elastic/synthetics NPM library that's bundled in docker images --- ...4342-use-stack-version-npm-synthetics.yaml | 31 +++++++++++++++++++ .../docker/Dockerfile.elastic-agent.tmpl | 2 +- .../templates/docker/Dockerfile.tmpl | 2 +- 3 files changed, 33 insertions(+), 2 deletions(-) create mode 100644 changelog/fragments/1665784342-use-stack-version-npm-synthetics.yaml diff --git a/changelog/fragments/1665784342-use-stack-version-npm-synthetics.yaml b/changelog/fragments/1665784342-use-stack-version-npm-synthetics.yaml new file mode 100644 index 00000000000..a928c800d1e --- /dev/null +++ b/changelog/fragments/1665784342-use-stack-version-npm-synthetics.yaml @@ -0,0 +1,31 @@ +# Kind can be one of: +# - breaking-change: a change to previously-documented behavior +# - deprecation: functionality that is being removed in a later release +# - bug-fix: fixes a problem in a previous version +# - enhancement: extends functionality but does not break or fix existing behavior +# - feature: new functionality +# - known-issue: problems that we are aware of in a given version +# - security: impacts on the security of a product or a user’s deployment. +# - upgrade: important information for someone upgrading from a prior version +# - other: does not fit into any of the other categories +kind: feature + +# Change summary; a 80ish characters long description of the change. +summary: use-stack-version-npm-synthetics + +# Long description; in case the summary is not enough to describe the change +# this field accommodate a description without length limits. +description: Always npm i the stack_release version of @elastic/synthetics + +# Affected component; a word indicating the component this changeset affects. +component: synthetics-integration + +# PR number; optional; the PR number that added the changeset. +# If not present is automatically filled by the tooling finding the PR where this changelog fragment has been added. +# NOTE: the tooling supports backports, so it's able to fill the original PR number instead of the backport PR number. +# Please provide it if you are adding a fragment for a different PR. +pr: 1528 + +# Issue number; optional; the GitHub issue related to this changeset (either closes or is part of). +# If not present is automatically filled by the tooling with the issue linked to the PR number. +#issue: 1234 diff --git a/dev-tools/packaging/templates/docker/Dockerfile.elastic-agent.tmpl b/dev-tools/packaging/templates/docker/Dockerfile.elastic-agent.tmpl index 02358d16d57..760d5e9949a 100644 --- a/dev-tools/packaging/templates/docker/Dockerfile.elastic-agent.tmpl +++ b/dev-tools/packaging/templates/docker/Dockerfile.elastic-agent.tmpl @@ -191,7 +191,7 @@ RUN cd {{$beatHome}}/.node \ RUN chown -R {{ .user }} $NODE_PATH USER {{ .user }} # If this fails dump the NPM logs -RUN npm i -g --loglevel verbose -f @elastic/synthetics || sh -c 'tail -n +1 /root/.npm/_logs/* && exit 1' +RUN npm i -g --loglevel verbose -f @elastic/synthetics@stack_release || sh -c 'tail -n +1 /root/.npm/_logs/* && exit 1' RUN chmod ug+rwX -R $NODE_PATH USER root diff --git a/dev-tools/packaging/templates/docker/Dockerfile.tmpl b/dev-tools/packaging/templates/docker/Dockerfile.tmpl index 06cce5a13b0..d2edf7909cb 100644 --- a/dev-tools/packaging/templates/docker/Dockerfile.tmpl +++ b/dev-tools/packaging/templates/docker/Dockerfile.tmpl @@ -181,7 +181,7 @@ RUN cd /usr/share/heartbeat/.node \ && mkdir -p node \ && curl ${NODE_DOWNLOAD_URL} | tar -xJ --strip 1 -C node \ && chmod ug+rwX -R $NODE_PATH \ - && npm i -g -f @elastic/synthetics && chmod ug+rwX -R $NODE_PATH + && npm i -g -f @elastic/synthetics@stack_release && chmod ug+rwX -R $NODE_PATH {{- end }} {{- range $i, $port := .ExposePorts }} From edc1e8582b4ed4d2ac2f1782dce59a8745909bdc Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Tue, 18 Oct 2022 01:37:20 -0400 Subject: [PATCH 176/180] [Automation] Update elastic stack version to 8.6.0-cae815eb for testing (#1545) Co-authored-by: apmmachine --- testing/environments/snapshot.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/testing/environments/snapshot.yml b/testing/environments/snapshot.yml index df54d740103..1c415537ad4 100644 --- a/testing/environments/snapshot.yml +++ b/testing/environments/snapshot.yml @@ -3,7 +3,7 @@ version: '2.3' services: elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch:8.6.0-54a302f0-SNAPSHOT + image: docker.elastic.co/elasticsearch/elasticsearch:8.6.0-cae815eb-SNAPSHOT # When extend is used it merges healthcheck.tests, see: # https://github.com/docker/compose/issues/8962 # healthcheck: @@ -42,7 +42,7 @@ services: - ./docker/logstash/pki:/etc/pki:ro kibana: - image: docker.elastic.co/kibana/kibana:8.6.0-54a302f0-SNAPSHOT + image: docker.elastic.co/kibana/kibana:8.6.0-cae815eb-SNAPSHOT environment: - "ELASTICSEARCH_USERNAME=kibana_system_user" - "ELASTICSEARCH_PASSWORD=testing" From 33a5f7e198cd759711f04758820d91e434df7b79 Mon Sep 17 00:00:00 2001 From: Michal Pristas Date: Tue, 18 Oct 2022 14:00:02 +0200 Subject: [PATCH 177/180] Fix admin permission check on localized windows (#1552) Fix admin permission check on localized windows (#1552) --- ...permission-check-on-localized-windows.yaml | 31 ++++++++++++ .../agent/control/server/listener_windows.go | 47 +++++++++++++++++-- 2 files changed, 73 insertions(+), 5 deletions(-) create mode 100644 changelog/fragments/1666088774-Fix-admin-permission-check-on-localized-windows.yaml diff --git a/changelog/fragments/1666088774-Fix-admin-permission-check-on-localized-windows.yaml b/changelog/fragments/1666088774-Fix-admin-permission-check-on-localized-windows.yaml new file mode 100644 index 00000000000..93d5999f1b0 --- /dev/null +++ b/changelog/fragments/1666088774-Fix-admin-permission-check-on-localized-windows.yaml @@ -0,0 +1,31 @@ +# Kind can be one of: +# - breaking-change: a change to previously-documented behavior +# - deprecation: functionality that is being removed in a later release +# - bug-fix: fixes a problem in a previous version +# - enhancement: extends functionality but does not break or fix existing behavior +# - feature: new functionality +# - known-issue: problems that we are aware of in a given version +# - security: impacts on the security of a product or a user’s deployment. +# - upgrade: important information for someone upgrading from a prior version +# - other: does not fit into any of the other categories +kind: bug-fix + +# Change summary; a 80ish characters long description of the change. +summary: Fix admin permission check on localized windows + +# Long description; in case the summary is not enough to describe the change +# this field accommodate a description without length limits. +#description: + +# Affected component; a word indicating the component this changeset affects. +component: + +# PR number; optional; the PR number that added the changeset. +# If not present is automatically filled by the tooling finding the PR where this changelog fragment has been added. +# NOTE: the tooling supports backports, so it's able to fill the original PR number instead of the backport PR number. +# Please provide it if you are adding a fragment for a different PR. +pr: 1552 + +# Issue number; optional; the GitHub issue related to this changeset (either closes or is part of). +# If not present is automatically filled by the tooling with the issue linked to the PR number. +issue: 857 diff --git a/internal/pkg/agent/control/server/listener_windows.go b/internal/pkg/agent/control/server/listener_windows.go index 69d211502ea..73fd3b97d95 100644 --- a/internal/pkg/agent/control/server/listener_windows.go +++ b/internal/pkg/agent/control/server/listener_windows.go @@ -10,6 +10,7 @@ package server import ( "net" "os/user" + "strings" "github.com/pkg/errors" @@ -18,9 +19,14 @@ import ( "github.com/elastic/elastic-agent/pkg/core/logger" ) +const ( + NTAUTHORITY_SYSTEM = "S-1-5-18" + ADMINISTRATORS_GROUP = "S-1-5-32-544" +) + // createListener creates a named pipe listener on Windows -func createListener(_ *logger.Logger) (net.Listener, error) { - sd, err := securityDescriptor() +func createListener(log *logger.Logger) (net.Listener, error) { + sd, err := securityDescriptor(log) if err != nil { return nil, err } @@ -31,7 +37,7 @@ func cleanupListener(_ *logger.Logger) { // nothing to do on windows } -func securityDescriptor() (string, error) { +func securityDescriptor(log *logger.Logger) (string, error) { u, err := user.Current() if err != nil { return "", errors.Wrap(err, "failed to get current user") @@ -42,11 +48,42 @@ func securityDescriptor() (string, error) { // String definition: https://docs.microsoft.com/en-us/windows/win32/secauthz/ace-strings // Give generic read/write access to the specified user. descriptor := "D:P(A;;GA;;;" + u.Uid + ")" - if u.Username == "NT AUTHORITY\\SYSTEM" { + + if isAdmin, err := isWindowsAdmin(u); err != nil { + // do not fail, agent would end up in a loop, continue with limited permissions + log.Warnf("failed to detect admin: %w", err) + } else if isAdmin { // running as SYSTEM, include Administrators group so Administrators can talk over // the named pipe to the running Elastic Agent system process // https://support.microsoft.com/en-us/help/243330/well-known-security-identifiers-in-windows-operating-systems - descriptor += "(A;;GA;;;S-1-5-32-544)" // Administrators group + descriptor += "(A;;GA;;;" + ADMINISTRATORS_GROUP + ")" } return descriptor, nil } + +func isWindowsAdmin(u *user.User) (bool, error) { + if u.Username == "NT AUTHORITY\\SYSTEM" { + return true, nil + } + + if equalsSystemGroup(u.Uid) || equalsSystemGroup(u.Gid) { + return true, nil + } + + groups, err := u.GroupIds() + if err != nil { + return false, errors.Wrap(err, "failed to get current user groups") + } + + for _, groupSid := range groups { + if equalsSystemGroup(groupSid) { + return true, nil + } + } + + return false, nil +} + +func equalsSystemGroup(s string) bool { + return strings.EqualFold(s, NTAUTHORITY_SYSTEM) || strings.EqualFold(s, ADMINISTRATORS_GROUP) +} From 962df8bf0dbf5b955a86695771a1a8fafdfba950 Mon Sep 17 00:00:00 2001 From: Blake Rouse Date: Tue, 18 Oct 2022 15:35:29 -0400 Subject: [PATCH 178/180] Fixes from merge of main. --- NOTICE.txt | 4 +- go.mod | 3 +- go.sum | 14 +- internal/pkg/agent/application/application.go | 4 +- .../gateway/fleet/fleet_gateway.go | 60 ++++--- .../agent/application/paths/common_test.go | 3 +- .../upgrade/artifact/config_test.go | 3 +- .../upgrade/artifact/download/reloadable.go | 14 ++ .../artifact/download/snapshot/downloader.go | 7 +- .../agent/application/upgrade/step_mark.go | 4 +- .../pkg/agent/application/upgrade/upgrade.go | 9 +- .../agent/migration/migrate_secret_test.go | 5 +- internal/pkg/agent/vars/vars.go | 5 +- .../pkg/composable/providers/agent/agent.go | 2 +- .../composable/providers/kubernetes/hints.go | 1 + .../core/monitoring/beats/sidecar_monitor.go | 154 ------------------ internal/pkg/core/status/handler.go | 44 ----- internal/pkg/fleetapi/checkin_cmd.go | 10 +- internal/pkg/testutils/status_reporter.go | 81 --------- magefile.go | 2 - 20 files changed, 96 insertions(+), 333 deletions(-) create mode 100644 internal/pkg/agent/application/upgrade/artifact/download/reloadable.go delete mode 100644 internal/pkg/core/monitoring/beats/sidecar_monitor.go delete mode 100644 internal/pkg/core/status/handler.go delete mode 100644 internal/pkg/testutils/status_reporter.go diff --git a/NOTICE.txt b/NOTICE.txt index 3c55a5e0295..cfc14a6d1b7 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -617,11 +617,11 @@ you may not use this file except in compliance with the Elastic License. -------------------------------------------------------------------------------- Dependency : github.com/elastic/elastic-agent-autodiscover -Version: v0.0.0-20220404145827-89887023c1ab +Version: v0.2.1 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/elastic/elastic-agent-autodiscover@v0.0.0-20220404145827-89887023c1ab/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/elastic/elastic-agent-autodiscover@v0.2.1/LICENSE: Apache License Version 2.0, January 2004 diff --git a/go.mod b/go.mod index 0aa3bfaa8cd..b1a27fc1102 100644 --- a/go.mod +++ b/go.mod @@ -11,7 +11,7 @@ require ( github.com/coreos/go-systemd/v22 v22.3.3-0.20220203105225-a9a7ef127534 github.com/docker/go-units v0.4.0 github.com/elastic/e2e-testing v1.99.2-0.20220117192005-d3365c99b9c4 - github.com/elastic/elastic-agent-autodiscover v0.0.0-20220404145827-89887023c1ab + github.com/elastic/elastic-agent-autodiscover v0.2.1 github.com/elastic/elastic-agent-client/v7 v7.0.0-20220804181728-b0328d2fe484 github.com/elastic/elastic-agent-libs v0.2.6 github.com/elastic/go-licenser v0.4.0 @@ -116,7 +116,6 @@ require ( go.elastic.co/apm/v2 v2.0.0 // indirect go.elastic.co/fastjson v1.1.0 // indirect go.uber.org/atomic v1.9.0 // indirect - go.uber.org/goleak v1.1.12 // indirect go.uber.org/multierr v1.8.0 // indirect golang.org/x/mod v0.5.1 // indirect golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4 // indirect diff --git a/go.sum b/go.sum index bc17c5e307b..888328dff65 100644 --- a/go.sum +++ b/go.sum @@ -92,6 +92,7 @@ github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2 github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00= github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+VxGOoXdC600= github.com/Microsoft/hcsshim v0.8.21/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4= +github.com/Microsoft/hcsshim v0.8.24/go.mod h1:4zegtUJth7lAvFyc6cH2gGQ5B3OFQim01nnU2M8jKDg= github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU= github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= @@ -227,6 +228,7 @@ github.com/containerd/cgroups v0.0.0-20200710171044-318312a37340/go.mod h1:s5q4S github.com/containerd/cgroups v0.0.0-20200824123100-0b889c03f102/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo= github.com/containerd/cgroups v0.0.0-20210114181951-8a68de567b68/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= github.com/containerd/cgroups v1.0.1/go.mod h1:0SJrPIenamHDcZhEcJMNBB85rHcUsw4f25ZfBiPYRkU= +github.com/containerd/cgroups v1.0.3/go.mod h1:/ofk34relqNjSGyqPrmEULrO4Sc8LJhvJmWbUCUKqj8= github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= github.com/containerd/console v0.0.0-20191206165004-02ecf6a7291e/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE= @@ -240,6 +242,7 @@ github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMX github.com/containerd/containerd v1.4.0-beta.2.0.20200729163537-40b22ef07410/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.4.1/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.4.3/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.4.9/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.5.0-beta.1/go.mod h1:5HfvG1V2FsKesEGQ17k5/T7V960Tmcumvqn8Mc+pCYQ= github.com/containerd/containerd v1.5.0-beta.3/go.mod h1:/wr9AVtEM7x9c+n0+stptlo/uBBoBORwEx6ardVcmKU= github.com/containerd/containerd v1.5.0-beta.4/go.mod h1:GmdgZd2zA2GYIBZ0w09ZvgqEq8EfBp/m3lcVZIvPHhI= @@ -281,6 +284,7 @@ github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDG github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8= github.com/containerd/ttrpc v1.0.1/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y= github.com/containerd/ttrpc v1.0.2/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y= +github.com/containerd/ttrpc v1.1.0/go.mod h1:XX4ZTnoOId4HklF4edwc4DcqskFZuvXB1Evzy5KFQpQ= github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc= github.com/containerd/typeurl v0.0.0-20190911142611-5eb25027c9fd/go.mod h1:GeKYzf2pQcqv7tJ0AoCuuhtnqhva5LNU3U+OyKxxJpk= github.com/containerd/typeurl v1.0.1/go.mod h1:TB1hUtrpaiO88KEK56ijojHS1+NeF0izUACaJW2mdXg= @@ -376,11 +380,11 @@ github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:Htrtb github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/elastic/e2e-testing v1.99.2-0.20220117192005-d3365c99b9c4 h1:uYT+Krd8dsvnhnLK9pe/JHZkYtXEGPfbV4Wt1JPPol0= github.com/elastic/e2e-testing v1.99.2-0.20220117192005-d3365c99b9c4/go.mod h1:UcNuf4pX/qDVNQr0zybm1NL2YoWik+jKBaINZqQCA40= -github.com/elastic/elastic-agent-autodiscover v0.0.0-20220404145827-89887023c1ab h1:Jk6Mfk5BF8gtfE7X0bNCiDGBtwJVxRI79b4wLCAsP+A= -github.com/elastic/elastic-agent-autodiscover v0.0.0-20220404145827-89887023c1ab/go.mod h1:Gg1fsQI+rVms9FJ2DefBSojfPIzgkV8xlyG8fPG0DE8= +github.com/elastic/elastic-agent-autodiscover v0.2.1 h1:Nbeayh3vq2FNm6xaFo34mhUdOu0EVlpj53CqCsbU0E4= +github.com/elastic/elastic-agent-autodiscover v0.2.1/go.mod h1:gPnzzfdYNdgznAb+iG9eyyXaQXBbAMHa+Y6Z8hXfcGY= github.com/elastic/elastic-agent-client/v7 v7.0.0-20220804181728-b0328d2fe484 h1:uJIMfLgCenJvxsVmEjBjYGxt0JddCgw2IxgoNfcIXOk= github.com/elastic/elastic-agent-client/v7 v7.0.0-20220804181728-b0328d2fe484/go.mod h1:fkvyUfFwyAG5OnMF0h+FV9sC0Xn9YLITwQpSuwungQs= -github.com/elastic/elastic-agent-libs v0.0.0-20220303160015-5b4e674da3dd/go.mod h1://82M1l73IHx0wDbS2Tzkq6Fx9fkmytS1KgkIyzvNTM= +github.com/elastic/elastic-agent-libs v0.2.5/go.mod h1:chO3rtcLyGlKi9S0iGVZhYCzDfdDsAQYBc+ui588AFE= github.com/elastic/elastic-agent-libs v0.2.6 h1:DpcUcCVYZ7lNtHLUlyT1u/GtGAh49wpL15DTH7+8O5o= github.com/elastic/elastic-agent-libs v0.2.6/go.mod h1:chO3rtcLyGlKi9S0iGVZhYCzDfdDsAQYBc+ui588AFE= github.com/elastic/elastic-package v0.32.1/go.mod h1:l1fEnF52XRBL6a5h6uAemtdViz2bjtjUtgdQcuRhEAY= @@ -389,6 +393,7 @@ github.com/elastic/go-elasticsearch/v8 v8.0.0-20210317102009-a9d74cec0186/go.mod github.com/elastic/go-licenser v0.3.1/go.mod h1:D8eNQk70FOCVBl3smCGQt/lv7meBeQno2eI1S5apiHQ= github.com/elastic/go-licenser v0.4.0 h1:jLq6A5SilDS/Iz1ABRkO6BHy91B9jBora8FwGRsDqUI= github.com/elastic/go-licenser v0.4.0/go.mod h1:V56wHMpmdURfibNBggaSBfqgPxyT1Tldns1i87iTEvU= +github.com/elastic/go-structform v0.0.9/go.mod h1:CZWf9aIRYY5SuKSmOhtXScE5uQiLZNqAFnwKR4OrIM4= github.com/elastic/go-sysinfo v1.1.1/go.mod h1:i1ZYdU10oLNfRzq4vq62BEwD2fH8KaWh6eh0ikPT9F0= github.com/elastic/go-sysinfo v1.7.1 h1:Wx4DSARcKLllpKT2TnFVdSUJOsybqMYCNQZq1/wO+s0= github.com/elastic/go-sysinfo v1.7.1/go.mod h1:i1ZYdU10oLNfRzq4vq62BEwD2fH8KaWh6eh0ikPT9F0= @@ -941,7 +946,6 @@ github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8 github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.0.0/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/opencontainers/image-spec v1.0.2-0.20190823105129-775207bd45b6/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM= github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= @@ -1393,6 +1397,7 @@ golang.org/x/net v0.0.0-20210520170846-37e1c6afe023/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211020060615-d418f374d309/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4 h1:HVyaeDAYux4pnY+D/SiwmLOR36ewZ4iGQIIrtnuCjFA= @@ -1551,6 +1556,7 @@ golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20211205182925-97ca703d548d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220422013727-9388b58f7150 h1:xHms4gcpe1YE7A3yIllJXP16CMAGuqwO2lX1mTyyRRc= golang.org/x/sys v0.0.0-20220422013727-9388b58f7150/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= diff --git a/internal/pkg/agent/application/application.go b/internal/pkg/agent/application/application.go index c5076535825..3acb0e1d6b8 100644 --- a/internal/pkg/agent/application/application.go +++ b/internal/pkg/agent/application/application.go @@ -73,6 +73,7 @@ func New( var configMgr coordinator.ConfigManager var managed *managedConfigManager var compModifiers []coordinator.ComponentsModifier + var composableManaged bool if configuration.IsStandalone(cfg.Fleet) { log.Info("Parsed configuration and determined agent is managed locally") @@ -100,6 +101,7 @@ func New( } else { log.Info("Parsed configuration and determined agent is managed by Fleet") + composableManaged = true compModifiers = append(compModifiers, FleetServerComponentModifier(cfg.Fleet.Server)) managed, err = newManagedConfigManager(log, agentInfo, cfg, store, runtime) if err != nil { @@ -109,7 +111,7 @@ func New( } } - composable, err := composable.New(log, rawConfig) + composable, err := composable.New(log, rawConfig, composableManaged) if err != nil { return nil, errors.New(err, "failed to initialize composable controller") } diff --git a/internal/pkg/agent/application/gateway/fleet/fleet_gateway.go b/internal/pkg/agent/application/gateway/fleet/fleet_gateway.go index 0288152f726..09396cf49fc 100644 --- a/internal/pkg/agent/application/gateway/fleet/fleet_gateway.go +++ b/internal/pkg/agent/application/gateway/fleet/fleet_gateway.go @@ -63,17 +63,18 @@ type stateStore interface { } type fleetGateway struct { - log *logger.Logger - client client.Sender - scheduler scheduler.Scheduler - settings *fleetGatewaySettings - agentInfo agentInfo - acker acker.Acker - unauthCounter int - stateFetcher coordinator.StateFetcher - stateStore stateStore - errCh chan error - actionCh chan []fleetapi.Action + log *logger.Logger + client client.Sender + scheduler scheduler.Scheduler + settings *fleetGatewaySettings + agentInfo agentInfo + acker acker.Acker + unauthCounter int + checkinFailCounter int + stateFetcher coordinator.StateFetcher + stateStore stateStore + errCh chan error + actionCh chan []fleetapi.Action } // New creates a new fleet gateway @@ -180,13 +181,25 @@ func (f *fleetGateway) doExecute(ctx context.Context, bo backoff.Backoff) (*flee // this mean we are rebooting to change the log level or the system is shutting us down. for ctx.Err() == nil { f.log.Debugf("Checking started") - resp, err := f.execute(ctx) + resp, took, err := f.execute(ctx) if err != nil { - f.log.Errorf("Could not communicate with fleet-server Checking API will retry, error: %s", err) + f.checkinFailCounter++ + + // Report the first two failures at warn level as they may be recoverable with retries. + if f.checkinFailCounter <= 2 { + f.log.Warnw("Possible transient error during checkin with fleet-server, retrying", + "error.message", err, "request_duration_ns", took, "failed_checkins", f.checkinFailCounter, + "retry_after_ns", bo.NextWait()) + } else { + f.log.Errorw("Cannot checkin in with fleet-server, retrying", + "error.message", err, "request_duration_ns", took, "failed_checkins", f.checkinFailCounter, + "retry_after_ns", bo.NextWait()) + } + if !bo.Wait() { // Something bad has happened and we log it and we should update our current state. err := errors.New( - "execute retry loop was stopped", + "checkin retry loop was stopped", errors.TypeNetwork, errors.M(errors.MetaKeyURI, f.client.URI()), ) @@ -197,6 +210,13 @@ func (f *fleetGateway) doExecute(ctx context.Context, bo backoff.Backoff) (*flee } continue } + + if f.checkinFailCounter > 0 { + // Log at same level as error logs above so subsequent successes are visible when log level is set to 'error'. + f.log.Errorf("Checkin request to fleet-server succeeded after %d failures", f.checkinFailCounter) + } + + f.checkinFailCounter = 0 // Request was successful, return the collected actions. return resp, nil } @@ -273,7 +293,7 @@ func (f *fleetGateway) convertToCheckinComponents(components []runtime.Component return checkinComponents } -func (f *fleetGateway) execute(ctx context.Context) (*fleetapi.CheckinResponse, error) { +func (f *fleetGateway) execute(ctx context.Context) (*fleetapi.CheckinResponse, time.Duration, error) { ecsMeta, err := info.Metadata() if err != nil { f.log.Error(errors.New("failed to load metadata", err)) @@ -301,7 +321,7 @@ func (f *fleetGateway) execute(ctx context.Context) (*fleetapi.CheckinResponse, Components: components, } - resp, err := cmd.Execute(ctx, req) + resp, took, err := cmd.Execute(ctx, req) if isUnauth(err) { f.unauthCounter++ @@ -309,15 +329,15 @@ func (f *fleetGateway) execute(ctx context.Context) (*fleetapi.CheckinResponse, f.log.Warnf("retrieved an invalid api key error '%d' times. Starting to unenroll the elastic agent.", f.unauthCounter) return &fleetapi.CheckinResponse{ Actions: []fleetapi.Action{&fleetapi.ActionUnenroll{ActionID: "", ActionType: "UNENROLL", IsDetected: true}}, - }, nil + }, took, nil } - return nil, err + return nil, took, err } f.unauthCounter = 0 if err != nil { - return nil, err + return nil, took, err } // Save the latest ackToken @@ -329,7 +349,7 @@ func (f *fleetGateway) execute(ctx context.Context) (*fleetapi.CheckinResponse, } } - return resp, nil + return resp, took, nil } // shouldUnenroll checks if the max number of trying an invalid key is reached diff --git a/internal/pkg/agent/application/paths/common_test.go b/internal/pkg/agent/application/paths/common_test.go index a5d76b405be..27a9cf80ebd 100644 --- a/internal/pkg/agent/application/paths/common_test.go +++ b/internal/pkg/agent/application/paths/common_test.go @@ -10,8 +10,9 @@ import ( "runtime" "testing" - "github.com/elastic/elastic-agent/internal/pkg/release" "github.com/google/go-cmp/cmp" + + "github.com/elastic/elastic-agent/internal/pkg/release" ) func validTestPath() string { diff --git a/internal/pkg/agent/application/upgrade/artifact/config_test.go b/internal/pkg/agent/application/upgrade/artifact/config_test.go index 3a9a694b757..803154e465f 100644 --- a/internal/pkg/agent/application/upgrade/artifact/config_test.go +++ b/internal/pkg/agent/application/upgrade/artifact/config_test.go @@ -8,9 +8,10 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "github.com/elastic/elastic-agent/internal/pkg/config" "github.com/elastic/elastic-agent/pkg/core/logger" - "github.com/stretchr/testify/require" ) func TestReload(t *testing.T) { diff --git a/internal/pkg/agent/application/upgrade/artifact/download/reloadable.go b/internal/pkg/agent/application/upgrade/artifact/download/reloadable.go new file mode 100644 index 00000000000..3b2239740c7 --- /dev/null +++ b/internal/pkg/agent/application/upgrade/artifact/download/reloadable.go @@ -0,0 +1,14 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package download + +import ( + "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact" +) + +// Reloader is an interface allowing to reload artifact config +type Reloader interface { + Reload(*artifact.Config) error +} diff --git a/internal/pkg/agent/application/upgrade/artifact/download/snapshot/downloader.go b/internal/pkg/agent/application/upgrade/artifact/download/snapshot/downloader.go index f5692208869..2a09c65e522 100644 --- a/internal/pkg/agent/application/upgrade/artifact/download/snapshot/downloader.go +++ b/internal/pkg/agent/application/upgrade/artifact/download/snapshot/downloader.go @@ -10,15 +10,12 @@ import ( "fmt" "strings" + "github.com/elastic/elastic-agent-libs/transport/httpcommon" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact" "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact/download" "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact/download/http" - - "github.com/elastic/elastic-agent-libs/transport/httpcommon" "github.com/elastic/elastic-agent/internal/pkg/agent/errors" - "github.com/elastic/elastic-agent/internal/pkg/artifact" - "github.com/elastic/elastic-agent/internal/pkg/artifact/download" - "github.com/elastic/elastic-agent/internal/pkg/artifact/download/http" "github.com/elastic/elastic-agent/internal/pkg/release" "github.com/elastic/elastic-agent/pkg/core/logger" ) diff --git a/internal/pkg/agent/application/upgrade/step_mark.go b/internal/pkg/agent/application/upgrade/step_mark.go index 80bfaab6c44..fa337e3907a 100644 --- a/internal/pkg/agent/application/upgrade/step_mark.go +++ b/internal/pkg/agent/application/upgrade/step_mark.go @@ -92,7 +92,7 @@ func newMarkerSerializer(m *UpdateMarker) *updateMarkerSerializer { } // markUpgrade marks update happened so we can handle grace period -func (u *Upgrader) markUpgrade(_ context.Context, log *logger.Logger, hash string, action Action) error { +func (u *Upgrader) markUpgrade(_ context.Context, log *logger.Logger, hash string, action *fleetapi.ActionUpgrade) error { prevVersion := release.Version() prevHash := release.Commit() if len(prevHash) > hashLen { @@ -104,7 +104,7 @@ func (u *Upgrader) markUpgrade(_ context.Context, log *logger.Logger, hash strin UpdatedOn: time.Now(), PrevVersion: prevVersion, PrevHash: prevHash, - Action: action.FleetAction(), + Action: action, } markerBytes, err := yaml.Marshal(newMarkerSerializer(marker)) diff --git a/internal/pkg/agent/application/upgrade/upgrade.go b/internal/pkg/agent/application/upgrade/upgrade.go index 5531de02a59..e4ef8c6066f 100644 --- a/internal/pkg/agent/application/upgrade/upgrade.go +++ b/internal/pkg/agent/application/upgrade/upgrade.go @@ -7,11 +7,12 @@ package upgrade import ( "context" "fmt" - "io/ioutil" "os" "path/filepath" "strings" + "github.com/elastic/elastic-agent/internal/pkg/config" + "github.com/otiai10/copy" "go.elastic.co/apm" @@ -111,7 +112,7 @@ func (u *Upgrader) Upgradeable() bool { // Upgrade upgrades running agent, function returns shutdown callback that must be called by reexec. func (u *Upgrader) Upgrade(ctx context.Context, version string, sourceURI string, action *fleetapi.ActionUpgrade) (_ reexec.ShutdownCallbackFn, err error) { - u.log.Infow("Upgrading agent", "version", a.Version(), "source_uri", a.SourceURI()) + u.log.Infow("Upgrading agent", "version", version, "source_uri", sourceURI) span, ctx := apm.StartSpan(ctx, "upgrade", "app.internal") defer span.End() @@ -131,7 +132,7 @@ func (u *Upgrader) Upgrade(ctx context.Context, version string, sourceURI string return nil, err } - newHash, err := u.unpack(ctx, version, archivePath) + newHash, err := u.unpack(version, archivePath) if err != nil { return nil, err } @@ -155,7 +156,7 @@ func (u *Upgrader) Upgrade(ctx context.Context, version string, sourceURI string return nil, err } - if err := u.markUpgrade(ctx, newHash, action); err != nil { + if err := u.markUpgrade(ctx, u.log, newHash, action); err != nil { u.log.Errorw("Rolling back: marking upgrade failed", "error.message", err) rollbackInstall(ctx, u.log, newHash) return nil, err diff --git a/internal/pkg/agent/migration/migrate_secret_test.go b/internal/pkg/agent/migration/migrate_secret_test.go index 562549c6db8..c6dfeb1781c 100644 --- a/internal/pkg/agent/migration/migrate_secret_test.go +++ b/internal/pkg/agent/migration/migrate_secret_test.go @@ -17,13 +17,14 @@ import ( "testing" "time" + "github.com/gofrs/uuid" + "github.com/google/go-cmp/cmp" + "github.com/elastic/elastic-agent-libs/logp" "github.com/elastic/elastic-agent/internal/pkg/agent/application/paths" "github.com/elastic/elastic-agent/internal/pkg/agent/application/secret" "github.com/elastic/elastic-agent/internal/pkg/agent/storage" "github.com/elastic/elastic-agent/internal/pkg/agent/vault" - "github.com/gofrs/uuid" - "github.com/google/go-cmp/cmp" ) func TestFindAgentSecretFromHomePath(t *testing.T) { diff --git a/internal/pkg/agent/vars/vars.go b/internal/pkg/agent/vars/vars.go index 7f0aff1c329..65c0ef2ae1f 100644 --- a/internal/pkg/agent/vars/vars.go +++ b/internal/pkg/agent/vars/vars.go @@ -10,18 +10,19 @@ import ( "fmt" "time" + "golang.org/x/sync/errgroup" + "github.com/elastic/elastic-agent/internal/pkg/agent/transpiler" "github.com/elastic/elastic-agent/internal/pkg/composable" "github.com/elastic/elastic-agent/internal/pkg/config" "github.com/elastic/elastic-agent/pkg/core/logger" - "golang.org/x/sync/errgroup" ) func WaitForVariables(ctx context.Context, l *logger.Logger, cfg *config.Config, wait time.Duration) ([]*transpiler.Vars, error) { var cancel context.CancelFunc var vars []*transpiler.Vars - composable, err := composable.New(l, cfg) + composable, err := composable.New(l, cfg, false) if err != nil { return nil, fmt.Errorf("failed to create composable controller: %w", err) } diff --git a/internal/pkg/composable/providers/agent/agent.go b/internal/pkg/composable/providers/agent/agent.go index dc5dfc249d3..2fb5bb284e5 100644 --- a/internal/pkg/composable/providers/agent/agent.go +++ b/internal/pkg/composable/providers/agent/agent.go @@ -15,7 +15,7 @@ import ( ) func init() { - _ = composable.Providers.MustAddContextProvider("agent", ContextProviderBuilder) + composable.Providers.MustAddContextProvider("agent", ContextProviderBuilder) } type contextProvider struct{} diff --git a/internal/pkg/composable/providers/kubernetes/hints.go b/internal/pkg/composable/providers/kubernetes/hints.go index 5499d1408cb..98bde12f54d 100644 --- a/internal/pkg/composable/providers/kubernetes/hints.go +++ b/internal/pkg/composable/providers/kubernetes/hints.go @@ -10,6 +10,7 @@ import ( "strings" "github.com/elastic/elastic-agent-autodiscover/utils" + "github.com/elastic/elastic-agent-libs/logp" "github.com/elastic/elastic-agent-libs/mapstr" ) diff --git a/internal/pkg/core/monitoring/beats/sidecar_monitor.go b/internal/pkg/core/monitoring/beats/sidecar_monitor.go deleted file mode 100644 index c5d45c1c82d..00000000000 --- a/internal/pkg/core/monitoring/beats/sidecar_monitor.go +++ /dev/null @@ -1,154 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package beats - -import ( - "fmt" - "os" - - "github.com/elastic/elastic-agent/internal/pkg/agent/configuration" - "github.com/elastic/elastic-agent/internal/pkg/agent/errors" - "github.com/elastic/elastic-agent/internal/pkg/agent/program" - "github.com/elastic/elastic-agent/internal/pkg/artifact" - "github.com/elastic/elastic-agent/internal/pkg/config" - monitoringConfig "github.com/elastic/elastic-agent/internal/pkg/core/monitoring/config" -) - -// SidecarMonitor provides information about the way how beat is monitored -type SidecarMonitor struct { - operatingSystem string - config *monitoringConfig.MonitoringConfig -} - -// NewSidecarMonitor creates a beats sidecar monitor, functionality is restricted purely on exposing -// http endpoint for diagnostics. -func NewSidecarMonitor(downloadConfig *artifact.Config, monitoringCfg *monitoringConfig.MonitoringConfig) *SidecarMonitor { - if monitoringCfg == nil { - monitoringCfg = monitoringConfig.DefaultConfig() - monitoringCfg.Pprof = &monitoringConfig.PprofConfig{Enabled: false} - monitoringCfg.HTTP.Buffer = &monitoringConfig.BufferConfig{Enabled: false} - } - - return &SidecarMonitor{ - operatingSystem: downloadConfig.OS(), - config: monitoringCfg, - } -} - -// Reload reloads state of the monitoring based on config. -func (b *SidecarMonitor) Reload(rawConfig *config.Config) error { - cfg := configuration.DefaultConfiguration() - if err := rawConfig.Unpack(&cfg); err != nil { - return err - } - - if cfg == nil || cfg.Settings == nil || cfg.Settings.MonitoringConfig == nil { - b.config = monitoringConfig.DefaultConfig() - } else { - if cfg.Settings.MonitoringConfig.Pprof == nil { - cfg.Settings.MonitoringConfig.Pprof = b.config.Pprof - } - if cfg.Settings.MonitoringConfig.HTTP.Buffer == nil { - cfg.Settings.MonitoringConfig.HTTP.Buffer = b.config.HTTP.Buffer - } - b.config = cfg.Settings.MonitoringConfig - } - - return nil -} - -// EnrichArgs enriches arguments provided to application, in order to enable -// monitoring -func (b *SidecarMonitor) EnrichArgs(spec program.Spec, pipelineID string, args []string) []string { - appendix := make([]string, 0, 7) - - if endpoint := MonitoringEndpoint(spec, b.operatingSystem, pipelineID, true); endpoint != "" { - appendix = append(appendix, - "-E", "http.enabled=true", - "-E", "http.host="+endpoint, - ) - if b.config.Pprof != nil && b.config.Pprof.Enabled { - appendix = append(appendix, - "-E", "http.pprof.enabled=true", - ) - } - if b.config.HTTP.Buffer != nil && b.config.HTTP.Buffer.Enabled { - appendix = append(appendix, - "-E", "http.buffer.enabled=true", - ) - } - } - - return append(args, appendix...) -} - -// Cleanup cleans up all drops. -func (b *SidecarMonitor) Cleanup(spec program.Spec, pipelineID string) error { - endpoint := MonitoringEndpoint(spec, b.operatingSystem, pipelineID, true) - drop := monitoringDrop(endpoint) - if drop == "" { - // not exposed using sockets - return nil - } - - return os.RemoveAll(drop) -} - -// Close disables monitoring -func (b *SidecarMonitor) Close() { - b.config.Enabled = false - b.config.MonitorMetrics = false - b.config.MonitorLogs = false -} - -// Prepare executes steps in order for monitoring to work correctly -func (b *SidecarMonitor) Prepare(spec program.Spec, pipelineID string, uid, gid int) error { - endpoint := MonitoringEndpoint(spec, b.operatingSystem, pipelineID, true) - drop := monitoringDrop(endpoint) - - if drop == "" { - // not exposed using sockets - return nil - } - - if err := os.MkdirAll(drop, 0775); err != nil { - return errors.New(err, fmt.Sprintf("failed to create a directory %q", drop)) - } - - if err := changeOwner(drop, uid, gid); err != nil { - return errors.New(err, fmt.Sprintf("failed to change owner of a directory %q", drop)) - } - - return nil -} - -// LogPath describes a path where application stores logs. Empty if -// application is not monitorable -func (b *SidecarMonitor) LogPath(program.Spec, string) string { - return "" -} - -// MetricsPath describes a location where application exposes metrics -// collectable by metricbeat. -func (b *SidecarMonitor) MetricsPath(program.Spec, string) string { - return "" -} - -// MetricsPathPrefixed return metrics path prefixed with http+ prefix. -func (b *SidecarMonitor) MetricsPathPrefixed(program.Spec, string) string { - return "" -} - -// IsMonitoringEnabled returns true if monitoring is configured. -func (b *SidecarMonitor) IsMonitoringEnabled() bool { return false } - -// WatchLogs return true if monitoring is configured and monitoring logs is enabled. -func (b *SidecarMonitor) WatchLogs() bool { return false } - -// WatchMetrics return true if monitoring is configured and monitoring metrics is enabled. -func (b *SidecarMonitor) WatchMetrics() bool { return false } - -// MonitoringNamespace returns monitoring namespace configured. -func (b *SidecarMonitor) MonitoringNamespace() string { return "default" } diff --git a/internal/pkg/core/status/handler.go b/internal/pkg/core/status/handler.go deleted file mode 100644 index 1fa72a10f93..00000000000 --- a/internal/pkg/core/status/handler.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package status - -import ( - "encoding/json" - "net/http" - "time" -) - -// LivenessResponse is the response body for the liveness endpoint. -type LivenessResponse struct { - ID string `json:"id"` - Status string `json:"status"` - Message string `json:"message"` - UpdateTime time.Time `json:"update_timestamp"` -} - -// ServeHTTP is an HTTP Handler for the status controller. -// It uses the local agent status so it is able to report a degraded state if the fleet-server checkin has issues. -// Respose code is 200 for a healthy agent, and 503 otherwise. -// Response body is a JSON object that contains the agent ID, status, message, and the last status update time. -func (r *controller) ServeHTTP(wr http.ResponseWriter, req *http.Request) { - s := r.LocalStatus() - lr := LivenessResponse{ - ID: r.agentID, - Status: s.Status.String(), - Message: s.Message, - UpdateTime: s.UpdateTime, - } - status := http.StatusOK - if s.Status != Healthy { - status = http.StatusServiceUnavailable - } - - wr.Header().Set("Content-Type", "application/json") - wr.WriteHeader(status) - enc := json.NewEncoder(wr) - if err := enc.Encode(lr); err != nil { - r.log.Errorf("Unable to encode liveness response: %v", err) - } -} diff --git a/internal/pkg/fleetapi/checkin_cmd.go b/internal/pkg/fleetapi/checkin_cmd.go index 9bd57196f0b..33bcd3dab55 100644 --- a/internal/pkg/fleetapi/checkin_cmd.go +++ b/internal/pkg/fleetapi/checkin_cmd.go @@ -38,11 +38,11 @@ type CheckinComponent struct { // CheckinRequest consists of multiple events reported to fleet ui. type CheckinRequest struct { - Status string `json:"status"` - AckToken string `json:"ack_token,omitempty"` - Metadata *info.ECSMeta `json:"local_metadata,omitempty"` - Message string `json:"message"` // V2 Agent message - Components []CheckinComponent `json:"components"` // V2 Agent components + Status string `json:"status"` + AckToken string `json:"ack_token,omitempty"` + Metadata *info.ECSMeta `json:"local_metadata,omitempty"` + Message string `json:"message"` // V2 Agent message + Components []CheckinComponent `json:"components"` // V2 Agent components } // SerializableEvent is a representation of the event to be send to the Fleet Server API via the checkin diff --git a/internal/pkg/testutils/status_reporter.go b/internal/pkg/testutils/status_reporter.go deleted file mode 100644 index 1d4fded4c0a..00000000000 --- a/internal/pkg/testutils/status_reporter.go +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package testutils - -import ( - "net/http" - - "github.com/elastic/elastic-agent/internal/pkg/core/state" - "github.com/elastic/elastic-agent/internal/pkg/core/status" - "github.com/stretchr/testify/mock" -) - -type MockController struct { - mock.Mock -} - -func (m *MockController) SetAgentID(id string) { - m.Called(id) -} - -func (m *MockController) RegisterComponent(id string) status.Reporter { - args := m.Called(id) - return args.Get(0).(status.Reporter) -} - -func (m *MockController) RegisterLocalComponent(id string) status.Reporter { - args := m.Called(id) - return args.Get(0).(status.Reporter) -} - -func (m *MockController) RegisterComponentWithPersistance(id string, b bool) status.Reporter { - args := m.Called(id, b) - return args.Get(0).(status.Reporter) -} - -func (m *MockController) RegisterApp(id, name string) status.Reporter { - args := m.Called(id, name) - return args.Get(0).(status.Reporter) -} - -func (m *MockController) Status() status.AgentStatus { - args := m.Called() - return args.Get(0).(status.AgentStatus) -} - -func (m *MockController) LocalStatus() status.AgentStatus { - args := m.Called() - return args.Get(0).(status.AgentStatus) -} - -func (m *MockController) StatusCode() status.AgentStatusCode { - args := m.Called() - return args.Get(0).(status.AgentStatusCode) -} - -func (m *MockController) StatusString() string { - args := m.Called() - return args.String(0) -} - -func (m *MockController) UpdateStateID(id string) { - m.Called(id) -} - -func (m *MockController) ServeHTTP(wr http.ResponseWriter, req *http.Request) { - m.Called(wr, req) -} - -type MockReporter struct { - mock.Mock -} - -func (m *MockReporter) Update(state state.Status, message string, meta map[string]interface{}) { - m.Called(state, message, meta) -} - -func (m *MockReporter) Unregister() { - m.Called() -} diff --git a/magefile.go b/magefile.go index 8a35bf10ce7..ed633505e49 100644 --- a/magefile.go +++ b/magefile.go @@ -10,14 +10,12 @@ package main import ( "context" "fmt" - "io" "os" "os/exec" "path/filepath" "runtime" "strconv" "strings" - "sync" "time" "github.com/hashicorp/go-multierror" From 2f934f1f7abde5e20b8168f76612ae9ef6475f7d Mon Sep 17 00:00:00 2001 From: Blake Rouse Date: Wed, 19 Oct 2022 20:58:26 -0400 Subject: [PATCH 179/180] Update heartbeat specification to only support elasticsearch. --- specs/heartbeat.spec.yml | 91 +++++++++++++++++++--------------------- 1 file changed, 44 insertions(+), 47 deletions(-) diff --git a/specs/heartbeat.spec.yml b/specs/heartbeat.spec.yml index 0b7da1c9048..ba6a08934b8 100644 --- a/specs/heartbeat.spec.yml +++ b/specs/heartbeat.spec.yml @@ -1,47 +1,44 @@ -version: 2 -inputs: - - name: synthetics/synthetics - description: "Synthetics Browser Monitor" - platforms: &platforms - - linux/amd64 - - linux/arm64 - - darwin/amd64 - - darwin/arm64 - - windows/amd64 - - container/amd64 - - container/arm64 - outputs: &outputs - - elasticsearch - - kafka - - logstash - - redis - command: - args: &args - - "-E" - - "setup.ilm.enabled=false" - - "-E" - - "setup.template.enabled=false" - - "-E" - - "management.enabled=true" - - "-E" - - "logging.level=debug" - - "-E" - - "gc_percent=${HEARTBEAT_GOGC:100}" - - name: synthetics/http - description: "Synthetics HTTP Monitor" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: synthetics/icmp - description: "Synthetics ICMP Monitor" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: synthetics/tcp - description: "Synthetics TCP Monitor" - platforms: *platforms - outputs: *outputs - command: - args: *args +version: 2 +inputs: + - name: synthetics/synthetics + description: "Synthetics Browser Monitor" + platforms: &platforms + - linux/amd64 + - linux/arm64 + - darwin/amd64 + - darwin/arm64 + - windows/amd64 + - container/amd64 + - container/arm64 + outputs: &outputs + - elasticsearch + command: + args: &args + - "-E" + - "setup.ilm.enabled=false" + - "-E" + - "setup.template.enabled=false" + - "-E" + - "management.enabled=true" + - "-E" + - "logging.level=debug" + - "-E" + - "gc_percent=${HEARTBEAT_GOGC:100}" + - name: synthetics/http + description: "Synthetics HTTP Monitor" + platforms: *platforms + outputs: *outputs + command: + args: *args + - name: synthetics/icmp + description: "Synthetics ICMP Monitor" + platforms: *platforms + outputs: *outputs + command: + args: *args + - name: synthetics/tcp + description: "Synthetics TCP Monitor" + platforms: *platforms + outputs: *outputs + command: + args: *args From 8b4f1716a7bc6ff163f1bad1e3627641be511102 Mon Sep 17 00:00:00 2001 From: Blake Rouse Date: Wed, 19 Oct 2022 21:04:42 -0400 Subject: [PATCH 180/180] Fix bad merge in dockerfile. --- .../templates/docker/Dockerfile.elastic-agent.tmpl | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/dev-tools/packaging/templates/docker/Dockerfile.elastic-agent.tmpl b/dev-tools/packaging/templates/docker/Dockerfile.elastic-agent.tmpl index 0c09c99a5aa..ab16391a611 100644 --- a/dev-tools/packaging/templates/docker/Dockerfile.elastic-agent.tmpl +++ b/dev-tools/packaging/templates/docker/Dockerfile.elastic-agent.tmpl @@ -147,17 +147,6 @@ RUN chown {{ .user }} /app {{- end }} {{- end }} -# Unpack beats to default install directory -RUN mkdir -p {{ $beatHome }}/data/{{.BeatName}}-{{ commit_short }}/{{ .beats_install_path }} && \ - for beatPath in {{ $beatHome }}/data/{{.BeatName}}-{{ commit_short }}/downloads/*.tar.gz; do \ - tar xf $beatPath -C {{ $beatHome }}/data/{{.BeatName}}-{{ commit_short }}/{{ .beats_install_path }}; \ - done && \ - chown -R {{ .user }}:{{ .user }} {{ $beatHome }}/data/{{.BeatName}}-{{ commit_short }}/{{ .beats_install_path }} && \ - chown -R root:root {{ $beatHome }}/data/{{.BeatName}}-{{ commit_short }}/{{ .beats_install_path }}/*/*.yml && \ - chmod 0644 {{ $beatHome }}/data/{{.BeatName}}-{{ commit_short }}/{{ .beats_install_path }}/*/*.yml && \ - # heartbeat requires cap_net_raw,cap_setuid to run ICMP checks and change npm user - setcap cap_net_raw,cap_setuid+p {{ $beatHome }}/data/{{.BeatName}}-{{ commit_short }}/{{ .beats_install_path }}/heartbeat-*/heartbeat - {{- if (and (contains .image_name "-complete") (not (contains .from "ubi-minimal"))) }} USER root ENV NODE_PATH={{ $beatHome }}/.node