diff --git a/.ci/end2end.groovy b/.ci/end2end.groovy index 8e9b041d32d3e..0cd64dcfd41fd 100644 --- a/.ci/end2end.groovy +++ b/.ci/end2end.groovy @@ -12,8 +12,7 @@ pipeline { environment { BASE_DIR = 'src/github.com/elastic/kibana' HOME = "${env.WORKSPACE}" - APM_ITS = 'apm-integration-testing' - CYPRESS_DIR = 'x-pack/plugins/apm/e2e' + E2E_DIR = 'x-pack/plugins/apm/e2e' PIPELINE_LOG_LEVEL = 'DEBUG' } options { @@ -43,32 +42,6 @@ pipeline { env.APM_UPDATED = isGitRegionMatch(patterns: regexps) } } - dir("${APM_ITS}"){ - git changelog: false, - credentialsId: 'f6c7695a-671e-4f4f-a331-acdce44ff9ba', - poll: false, - url: "git@github.com:elastic/${APM_ITS}.git" - } - } - } - stage('Start services') { - options { skipDefaultCheckout() } - when { - anyOf { - expression { return params.FORCE } - expression { return env.APM_UPDATED != "false" } - } - } - steps { - notifyStatus('Starting services', 'PENDING') - dir("${APM_ITS}"){ - sh './scripts/compose.py start master --no-kibana' - } - } - post { - unsuccessful { - notifyStatus('Environmental issue', 'FAILURE') - } } } stage('Prepare Kibana') { @@ -85,7 +58,7 @@ pipeline { steps { notifyStatus('Preparing kibana', 'PENDING') dir("${BASE_DIR}"){ - sh script: "${CYPRESS_DIR}/ci/prepare-kibana.sh" + sh "${E2E_DIR}/ci/prepare-kibana.sh" } } post { @@ -105,24 +78,20 @@ pipeline { steps{ notifyStatus('Running smoke tests', 'PENDING') dir("${BASE_DIR}"){ - sh ''' - jobs -l - docker build --tag cypress --build-arg NODE_VERSION=$(cat .node-version) ${CYPRESS_DIR}/ci - docker run --rm -t --user "$(id -u):$(id -g)" \ - -v `pwd`:/app --network="host" \ - --name cypress cypress''' + sh "${E2E_DIR}/ci/run-e2e.sh" } } post { always { - dir("${BASE_DIR}"){ - archiveArtifacts(allowEmptyArchive: false, artifacts: "${CYPRESS_DIR}/**/screenshots/**,${CYPRESS_DIR}/**/videos/**,${CYPRESS_DIR}/**/test-results/*e2e-tests.xml") - junit(allowEmptyResults: true, testResults: "${CYPRESS_DIR}/**/test-results/*e2e-tests.xml") - } - dir("${APM_ITS}"){ - sh 'docker-compose logs > apm-its.log || true' - sh 'docker-compose down -v || true' - archiveArtifacts(allowEmptyArchive: false, artifacts: 'apm-its.log') + dir("${BASE_DIR}/${E2E_DIR}"){ + archiveArtifacts(allowEmptyArchive: false, artifacts: 'cypress/screenshots/**,cypress/videos/**,cypress/test-results/*e2e-tests.xml') + junit(allowEmptyResults: true, testResults: 'cypress/test-results/*e2e-tests.xml') + dir('tmp/apm-integration-testing'){ + sh 'docker-compose logs > apm-its-docker.log || true' + sh 'docker-compose down -v || true' + archiveArtifacts(allowEmptyArchive: true, artifacts: 'apm-its-docker.log') + } + archiveArtifacts(allowEmptyArchive: true, artifacts: 'tmp/*.log') } } unsuccessful { @@ -137,7 +106,7 @@ pipeline { post { always { dir("${BASE_DIR}"){ - archiveArtifacts(allowEmptyArchive: true, artifacts: "${CYPRESS_DIR}/ingest-data.log,kibana.log") + archiveArtifacts(allowEmptyArchive: true, artifacts: "${E2E_DIR}/kibana.log") } } } diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index a160094a54130..6df4136ef74af 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -142,6 +142,7 @@ /config/kibana.yml @elastic/kibana-platform /x-pack/plugins/features/ @elastic/kibana-platform /x-pack/plugins/licensing/ @elastic/kibana-platform +/x-pack/plugins/cloud/ @elastic/kibana-platform /packages/kbn-config-schema/ @elastic/kibana-platform /src/legacy/server/config/ @elastic/kibana-platform /src/legacy/server/http/ @elastic/kibana-platform diff --git a/docs/management/ingest-pipelines/images/ingest-pipeline-list.png b/docs/management/ingest-pipelines/images/ingest-pipeline-list.png new file mode 100755 index 0000000000000..5080b4e0bd477 Binary files /dev/null and b/docs/management/ingest-pipelines/images/ingest-pipeline-list.png differ diff --git a/docs/management/ingest-pipelines/images/ingest-pipeline-privileges.png b/docs/management/ingest-pipelines/images/ingest-pipeline-privileges.png new file mode 100755 index 0000000000000..ad9451e02e2ea Binary files /dev/null and b/docs/management/ingest-pipelines/images/ingest-pipeline-privileges.png differ diff --git a/docs/management/ingest-pipelines/images/ingest-pipeline-processor.png b/docs/management/ingest-pipelines/images/ingest-pipeline-processor.png new file mode 100755 index 0000000000000..8d8b8aa4b42e3 Binary files /dev/null and b/docs/management/ingest-pipelines/images/ingest-pipeline-processor.png differ diff --git a/docs/management/ingest-pipelines/ingest-pipelines.asciidoc b/docs/management/ingest-pipelines/ingest-pipelines.asciidoc new file mode 100644 index 0000000000000..8c259dae256d4 --- /dev/null +++ b/docs/management/ingest-pipelines/ingest-pipelines.asciidoc @@ -0,0 +1,144 @@ +[role="xpack"] +[[ingest-node-pipelines]] +== Ingest Node Pipelines + +*Ingest Node Pipelines* enables you to create and manage {es} +pipelines that perform common transformations and +enrichments on your data. For example, you might remove a field, +rename an existing field, or set a new field. + +You’ll find *Ingest Node Pipelines* in *Management > Elasticsearch*. With this feature, you can: + +* View a list of your pipelines and drill down into details. +* Create a pipeline that defines a series of tasks, known as processors. +* Test a pipeline before feeding it with real data to ensure the pipeline works as expected. +* Delete a pipeline that is no longer needed. + +[role="screenshot"] +image:management/ingest-pipelines/images/ingest-pipeline-list.png["Ingest node pipeline list"] + +[float] +=== Required permissions + +The minimum required permissions to access *Ingest Node Pipelines* are +the `manage_pipeline` and `cluster:monitor/nodes/info` cluster privileges. + +You can add these privileges in *Management > Security > Roles*. + +[role="screenshot"] +image:management/ingest-pipelines/images/ingest-pipeline-privileges.png["Privileges required for Ingest Node Pipelines"] + +[float] +[[ingest-node-pipelines-manage]] +=== Manage pipelines + +From the list view, you can to drill down into the details of a pipeline. +To +edit, clone, or delete a pipeline, use the *Actions* menu. + +If you don’t have any pipelines, you can create one using the +*Create pipeline* form. You’ll define processors to transform documents +in a specific way. To handle exceptions, you can optionally define +failure processors to execute immediately after a failed processor. +Before creating the pipeline, you can verify it provides the expected output. + +[float] +[[ingest-node-pipelines-example]] +==== Example: Create a pipeline + +In this example, you’ll create a pipeline to handle server logs in the +Common Log Format. The log looks similar to this: + +[source,js] +---------------------------------- +212.87.37.154 - - [05/May/2020:16:21:15 +0000] \"GET /favicon.ico HTTP/1.1\" +200 3638 \"-\" \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) +AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36\" +---------------------------------- + +The log contains an IP address, timestamp, and user agent. You want to give +these three items their own field in {es} for fast search and visualization. +You also want to know where the request is coming from. + +. In *Ingest Node Pipelines*, click *Create a pipeline*. +. Provide a name and description for the pipeline. +. Define the processors: ++ +[source,js] +---------------------------------- +[ + { + "grok": { + "field": "message", + "patterns": ["%{IPORHOST:clientip} %{USER:ident} %{USER:auth} \\[%{HTTPDATE:timestamp}\\] \"%{WORD:verb} %{DATA:request} HTTP/%{NUMBER:httpversion}\" %{NUMBER:response:int} (?:-|%{NUMBER:bytes:int}) %{QS:referrer} %{QS:agent}"] + } + }, + { + "date": { + "field": "timestamp", + "formats": [ "dd/MMM/YYYY:HH:mm:ss Z" ] + } + }, + { + "geoip": { + "field": "clientip" + } + }, + { + "user_agent": { + "field": "agent" + } + } + ] +---------------------------------- ++ +This code defines four {ref}/ingest-processors.html[processors] that run sequentially: +{ref}/grok-processor.html[grok], {ref}/date-processor.html[date], +{ref}/geoip-processor.html[geoip], and {ref}/user-agent-processor.html[user_agent]. +Your form should look similar to this: ++ +[role="screenshot"] +image:management/ingest-pipelines/images/ingest-pipeline-processor.png["Processors for Ingest Node Pipelines"] + +. To verify that the pipeline gives the expected outcome, click *Test pipeline*. + +. In the *Document* tab, provide the following sample document for testing: ++ +[source,js] +---------------------------------- +[ + { + "_source": { + "message": "212.87.37.154 - - [05/May/2020:16:21:15 +0000] \"GET /favicon.ico HTTP/1.1\" 200 3638 \"-\" \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36\"" + } + } +] +---------------------------------- + +. Click *Run the pipeline* and check if the pipeline worked as expected. ++ +You can also +view the verbose output and refresh the output from this view. + +. If everything looks correct, close the panel, and then click *Create pipeline*. ++ +At this point, you’re ready to use the Elasticsearch index API to load +the logs data. + +. In the Kibana Console, index a document with the pipeline +you created. ++ +[source,js] +---------------------------------- +PUT my-index/_doc/1?pipeline=access_logs +{ + "message": "212.87.37.154 - - [05/May/2020:16:21:15 +0000] \"GET /favicon.ico HTTP/1.1\" 200 3638 \"-\" \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36\"" +} +---------------------------------- + +. To verify, run: ++ +[source,js] +---------------------------------- +GET my-index/_doc/1 +---------------------------------- diff --git a/docs/uptime/alerting.asciidoc b/docs/uptime/alerting.asciidoc new file mode 100644 index 0000000000000..24f7628e960f9 --- /dev/null +++ b/docs/uptime/alerting.asciidoc @@ -0,0 +1,30 @@ +[role="xpack"] +[[uptime-alerting]] + +== Uptime alerting + +The Uptime app integrates with Kibana's {kibana-ref}/alerting-getting-started.html[alerting and actions] +feature. It provides a set of built-in actions and Uptime specific threshold alerts for you to use +and enables central management of all alerts from <>. + +[float] +=== Monitor status alerts + +To receive alerts when a monitor goes down, use the alerting menu at the top of the +overview page. Use a query in the alert flyout to determine which monitors to check +with your alert. If you already have a query in the overview page search bar it will +be carried over into this box. + +[role="screenshot"] +image::uptime/images/monitor-status-alert-flyout.png[Create monitor status alert flyout] + +[float] +=== TLS alerts + +Uptime also provides the ability to create an alert that will notify you when one or +more of your monitors have a TLS certificate that will expire within some threshold, +or when its age exceeds a limit. The values for these thresholds are configurable on +the <>. + +[role="screenshot"] +image::uptime/images/tls-alert-flyout.png[Create TLS alert flyout] diff --git a/docs/uptime/certificates.asciidoc b/docs/uptime/certificates.asciidoc new file mode 100644 index 0000000000000..cc604d7196648 --- /dev/null +++ b/docs/uptime/certificates.asciidoc @@ -0,0 +1,15 @@ +[role="xpack"] +[[uptime-certificates]] + +== Certificates + +[role="screenshot"] +image::uptime/images/certificates-page.png[Certificates] + +The certificates page allows you to visualize TLS certificate data in your indices. In addition to the +common name, associated monitors, issuer information, and SHA fingerprints, Uptime also assigns a status +derived from the threshold values in the <>. + +Several of the columns on this page are sortable. You can use the search bar at the top of the view +to find values in most of the TLS-related fields in your Uptime indices. Additionally, you can +create a TLS alert using the `Alerts` dropdown at the top of the page. diff --git a/docs/uptime/images/alert-flyout.png b/docs/uptime/images/alert-flyout.png deleted file mode 100644 index 7fc1e3d9aefe2..0000000000000 Binary files a/docs/uptime/images/alert-flyout.png and /dev/null differ diff --git a/docs/uptime/images/certificates-page.png b/docs/uptime/images/certificates-page.png new file mode 100644 index 0000000000000..598aae982cd6a Binary files /dev/null and b/docs/uptime/images/certificates-page.png differ diff --git a/docs/uptime/images/check-history.png b/docs/uptime/images/check-history.png index 91565bf59aa7f..aac5efd9b91d3 100644 Binary files a/docs/uptime/images/check-history.png and b/docs/uptime/images/check-history.png differ diff --git a/docs/uptime/images/crosshair-example.png b/docs/uptime/images/crosshair-example.png index a4559eac1c3e7..f9e89c4f622e0 100644 Binary files a/docs/uptime/images/crosshair-example.png and b/docs/uptime/images/crosshair-example.png differ diff --git a/docs/uptime/images/filter-bar.png b/docs/uptime/images/filter-bar.png index dee735d0f4907..b7c424d3d0d91 100644 Binary files a/docs/uptime/images/filter-bar.png and b/docs/uptime/images/filter-bar.png differ diff --git a/docs/uptime/images/monitor-list.png b/docs/uptime/images/monitor-list.png index 0c8ad473428bd..c9a8eccf01f6e 100644 Binary files a/docs/uptime/images/monitor-list.png and b/docs/uptime/images/monitor-list.png differ diff --git a/docs/uptime/images/monitor-status-alert-flyout.png b/docs/uptime/images/monitor-status-alert-flyout.png new file mode 100644 index 0000000000000..407e69fc5e86e Binary files /dev/null and b/docs/uptime/images/monitor-status-alert-flyout.png differ diff --git a/docs/uptime/images/observability_integrations.png b/docs/uptime/images/observability_integrations.png index 6589c0c5565dd..3b23aa2dbd2a5 100644 Binary files a/docs/uptime/images/observability_integrations.png and b/docs/uptime/images/observability_integrations.png differ diff --git a/docs/uptime/images/settings.png b/docs/uptime/images/settings.png index dd36f0a6d702b..d19b7f842ea68 100644 Binary files a/docs/uptime/images/settings.png and b/docs/uptime/images/settings.png differ diff --git a/docs/uptime/images/snapshot-view.png b/docs/uptime/images/snapshot-view.png index 1fce2e9592c14..b6f07fb0721aa 100644 Binary files a/docs/uptime/images/snapshot-view.png and b/docs/uptime/images/snapshot-view.png differ diff --git a/docs/uptime/images/status-bar.png b/docs/uptime/images/status-bar.png index 8d242789cdccd..fd72e2b78c2a0 100644 Binary files a/docs/uptime/images/status-bar.png and b/docs/uptime/images/status-bar.png differ diff --git a/docs/uptime/images/tls-alert-flyout.png b/docs/uptime/images/tls-alert-flyout.png new file mode 100644 index 0000000000000..07c725c858a00 Binary files /dev/null and b/docs/uptime/images/tls-alert-flyout.png differ diff --git a/docs/uptime/index.asciidoc b/docs/uptime/index.asciidoc index a355f8ecf4843..c44ef366eaaa4 100644 --- a/docs/uptime/index.asciidoc +++ b/docs/uptime/index.asciidoc @@ -13,9 +13,13 @@ To get started with Elastic Uptime, refer to {uptime-guide}/install-uptime.html[ * <> * <> * <> +* <> +* <> -- include::overview.asciidoc[] include::monitor.asciidoc[] include::settings.asciidoc[] +include::certificates.asciidoc[] +include::alerting.asciidoc[] diff --git a/docs/uptime/overview.asciidoc b/docs/uptime/overview.asciidoc index 71c09c968e512..b449beddd240c 100644 --- a/docs/uptime/overview.asciidoc +++ b/docs/uptime/overview.asciidoc @@ -49,17 +49,6 @@ way to navigate to a more in-depth visualization for interesting hosts or endpoi This table includes information like the most recent status, when the monitor was last checked, its ID and URL, its IP address, and a dedicated sparkline showing its check status over time. -[float] -=== Creating and managing alerts - -[role="screenshot"] -image::uptime/images/alert-flyout.png[Create alert flyout] - -To receive alerts when a monitor goes down, use the alerting menu at the top of the -overview page. Use a query in the alert flyout to determine which monitors to check -with your alert. If you already have a query in the overview page search bar it will -be carried over into this box. - [float] === Observability integrations diff --git a/docs/uptime/settings.asciidoc b/docs/uptime/settings.asciidoc index 55da6e802bec6..131772609cb59 100644 --- a/docs/uptime/settings.asciidoc +++ b/docs/uptime/settings.asciidoc @@ -4,7 +4,9 @@ == Settings [role="screenshot"] -image::uptime/images/settings.png[Filter bar] +image::uptime/images/settings.png[Settings page] + +=== Indices The Uptime settings page lets you change which Heartbeat indices are displayed by the uptime app. Users must have the 'all' permission to modify items on this page. @@ -25,3 +27,22 @@ to manually query Elasticsearch for data outside this pattern! See the <> and {heartbeat-ref}/securing-heartbeat.html[Heartbeat security] docs for more information. + +=== Certificate thresholds + +You can modify settings in this section to control how Uptime will visualize your TLS values in the Certificates page. +These settings also determine which certificates will be selected by any TLS alert you define. + +There are two fields, `age` and `expiration`. Use the `age` threshold to specify when Uptime should warn +you about certificates that have been valid for too long. Use the `expiration` threshold to make Uptime warn you +about certificates that have approaching expiration dates. + +For example, a common security requirement is to make sure that none of your organization's TLS certificates have been +valid for longer than one year. Modifying the `Age limit` field's value to 365 days will help you keep track of which +certificates you may want to refresh. + +Likewise, to see which of your TLS certificates are close to expiring ahead of time, specify +an `Expiration threshold` on this page. When the count of a certificate's remaining valid days falls +below this threshold, Uptime will consider it in a warning state. If you have defined a TLS alert, you will +receive a notification from Uptime about the certificate. + diff --git a/docs/user/management.asciidoc b/docs/user/management.asciidoc index a4ba320e826b1..bcaede01b7a86 100644 --- a/docs/user/management.asciidoc +++ b/docs/user/management.asciidoc @@ -32,6 +32,12 @@ View index settings, mappings, and statistics and perform operations, such as re flushing, and clearing the cache. Practicing good index management ensures that your data is stored cost effectively. +a| <> + +Create and manage {es} +pipelines that enable you to perform common transformations and +enrichments on your data. + | <> View the status of your license, start a trial, or install a new license. For @@ -85,7 +91,7 @@ set the timespan for notification messages, and much more. | <> -Centrally manage your alerts across {kib}. Create and manage reusable +Centrally manage your alerts across {kib}. Create and manage reusable connectors for triggering actions. | <> @@ -140,6 +146,8 @@ include::{kib-repo-dir}/management/index-lifecycle-policies/example-index-lifecy include::{kib-repo-dir}/management/managing-indices.asciidoc[] +include::{kib-repo-dir}/management/ingest-pipelines/ingest-pipelines.asciidoc[] + include::{kib-repo-dir}/management/managing-fields.asciidoc[] include::{kib-repo-dir}/management/managing-licenses.asciidoc[] diff --git a/renovate.json5 b/renovate.json5 index f5bb39a16fe46..9a2ac20f91f04 100644 --- a/renovate.json5 +++ b/renovate.json5 @@ -25,7 +25,7 @@ 'Team:Operations', 'renovate', 'v8.0.0', - 'v7.7.0', + 'v7.9.0', ], major: { labels: [ @@ -33,7 +33,7 @@ 'Team:Operations', 'renovate', 'v8.0.0', - 'v7.7.0', + 'v7.9.0', 'renovate:major', ], }, @@ -246,7 +246,7 @@ 'Team:Operations', 'renovate', 'v8.0.0', - 'v7.7.0', + 'v7.9.0', ':ml', ], }, diff --git a/src/dev/renovate/config.ts b/src/dev/renovate/config.ts index 09283e6a5bf2d..d868f0a89b98c 100644 --- a/src/dev/renovate/config.ts +++ b/src/dev/renovate/config.ts @@ -21,7 +21,7 @@ import { RENOVATE_PACKAGE_GROUPS } from './package_groups'; import { PACKAGE_GLOBS } from './package_globs'; import { wordRegExp, maybeFlatMap, maybeMap, getTypePackageName } from './utils'; -const DEFAULT_LABELS = ['release_note:skip', 'Team:Operations', 'renovate', 'v8.0.0', 'v7.7.0']; +const DEFAULT_LABELS = ['release_note:skip', 'Team:Operations', 'renovate', 'v8.0.0', 'v7.9.0']; export const RENOVATE_CONFIG = { extends: ['config:base'], diff --git a/test/accessibility/apps/discover.ts b/test/accessibility/apps/discover.ts index 4563ffd19dd87..38552f5ecdafe 100644 --- a/test/accessibility/apps/discover.ts +++ b/test/accessibility/apps/discover.ts @@ -34,7 +34,8 @@ export default function ({ getService, getPageObjects }: FtrProviderContext) { ['geo.src', 'IN'], ]; - describe('Discover', () => { + // FLAKY: https://github.com/elastic/kibana/issues/62497 + describe.skip('Discover', () => { before(async () => { await esArchiver.load('discover'); await esArchiver.loadIfNeeded('logstash_functional'); diff --git a/x-pack/plugins/apm/e2e/README.md b/x-pack/plugins/apm/e2e/README.md index b630747ac2d3e..cf29b14d19541 100644 --- a/x-pack/plugins/apm/e2e/README.md +++ b/x-pack/plugins/apm/e2e/README.md @@ -6,21 +6,4 @@ x-pack/plugins/apm/e2e/run-e2e.sh ``` -_Starts Kibana, APM Server, Elasticsearch (with sample data) and runs the tests_ - -## Reproducing CI builds - -> This process is very slow compared to the local development described above. Consider that the CI must install and configure the build tools and create a Docker image for the project to run tests in a consistent manner. - -The Jenkins CI uses a shell script to prepare Kibana: - -```shell -# Prepare and run Kibana locally -$ x-pack/plugins/apm/e2e/ci/prepare-kibana.sh -# Build Docker image for Kibana -$ docker build --tag cypress --build-arg NODE_VERSION=$(cat .node-version) x-pack/plugins/apm/e2e/ci -# Run Docker image -$ docker run --rm -t --user "$(id -u):$(id -g)" \ - -v `pwd`:/app --network="host" \ - --name cypress cypress -``` +_Starts APM Server, Elasticsearch (with sample data) and runs the tests_ diff --git a/x-pack/plugins/apm/e2e/ci/Dockerfile b/x-pack/plugins/apm/e2e/ci/Dockerfile deleted file mode 100644 index 2bcc5a5fd843a..0000000000000 --- a/x-pack/plugins/apm/e2e/ci/Dockerfile +++ /dev/null @@ -1,24 +0,0 @@ -ARG NODE_VERSION -FROM node:$NODE_VERSION - -RUN apt-get -qq update \ - && apt-get -y -qq install xvfb \ - libgtk-3-0 \ - libxtst6 \ - libnotify-dev \ - libgconf-2-4 \ - libnss3 \ - libxss1 \ - libasound2 \ - --no-install-recommends \ - && rm -rf /var/lib/apt/lists/* - -## Add host.docker.internal to localhost -RUN apt-get -qq update \ - && apt-get -y -qq install dnsutils \ - --no-install-recommends \ - && rm -rf /var/lib/apt/lists/* - -COPY entrypoint.sh /entrypoint.sh - -ENTRYPOINT ["/entrypoint.sh"] diff --git a/x-pack/plugins/apm/e2e/ci/entrypoint.sh b/x-pack/plugins/apm/e2e/ci/entrypoint.sh deleted file mode 100755 index 3349aa74dadb9..0000000000000 --- a/x-pack/plugins/apm/e2e/ci/entrypoint.sh +++ /dev/null @@ -1,36 +0,0 @@ -#!/usr/bin/env bash -set -xe - -## host.docker.internal is not available in native docker installations -kibana=$(dig +short host.docker.internal) -if [ -z "${kibana}" ] ; then - kibana=127.0.0.1 -fi - -export CYPRESS_BASE_URL=http://${kibana}:5701 - -## To avoid issues with the home and caching artifacts -export HOME=/tmp -npm config set cache ${HOME} - -## To avoid issues with volumes. -#rsync -rv --exclude=.git --exclude=docs \ -# --exclude=.cache --exclude=node_modules \ -# --exclude=test/ \ -# --exclude=src/ \ -# --exclude=packages/ \ -# --exclude=built_assets --exclude=target \ -# --exclude=data /app ${HOME}/ -#cd ${HOME}/app/x-pack/plugins/apm/e2e/cypress - -cd /app/x-pack/plugins/apm/e2e -## Install dependencies for cypress -CI=true npm install -yarn install - -# Wait for the kibana to be up and running -npm install wait-on -./node_modules/.bin/wait-on ${CYPRESS_BASE_URL}/status && echo 'Kibana is up and running' - -# Run cypress -npm run cypress:run diff --git a/x-pack/plugins/apm/e2e/ci/prepare-kibana.sh b/x-pack/plugins/apm/e2e/ci/prepare-kibana.sh index 637f8fa9b4c74..f383dd6d16f7f 100755 --- a/x-pack/plugins/apm/e2e/ci/prepare-kibana.sh +++ b/x-pack/plugins/apm/e2e/ci/prepare-kibana.sh @@ -1,21 +1,13 @@ #!/usr/bin/env bash -set -e +set -ex -E2E_DIR="x-pack/plugins/apm/e2e" - -echo "1/3 Install dependencies ..." +E2E_DIR=x-pack/plugins/apm/e2e +echo "1/2 Install dependencies ..." # shellcheck disable=SC1091 source src/dev/ci_setup/setup_env.sh true -yarn kbn bootstrap - -echo "2/3 Ingest test data ..." -pushd ${E2E_DIR} -yarn install -curl --silent https://storage.googleapis.com/apm-ui-e2e-static-data/events.json --output ingest-data/events.json -node ingest-data/replay.js --server-url http://localhost:8201 --secret-token abcd --events ./events.json > ingest-data.log +yarn kbn clean && yarn kbn bootstrap -echo "3/3 Start Kibana ..." -popd +echo "2/2 Start Kibana ..." ## Might help to avoid FATAL ERROR: Ineffective mark-compacts near heap limit Allocation failed - JavaScript heap out of memory export NODE_OPTIONS="--max-old-space-size=4096" -nohup node scripts/kibana --config "${E2E_DIR}/ci/kibana.e2e.yml" --no-base-path --optimize.watch=false> kibana.log 2>&1 & +nohup node ./scripts/kibana --no-base-path --no-watch --dev --no-dev-config --config ${E2E_DIR}/ci/kibana.e2e.yml > ${E2E_DIR}/kibana.log 2>&1 & diff --git a/x-pack/plugins/apm/e2e/ci/run-e2e.sh b/x-pack/plugins/apm/e2e/ci/run-e2e.sh new file mode 100755 index 0000000000000..c40ab5d646477 --- /dev/null +++ b/x-pack/plugins/apm/e2e/ci/run-e2e.sh @@ -0,0 +1,11 @@ +#!/usr/bin/env bash +## +## This is a wrapper to configure the environment with the right tools in the CI +## and run the e2e steps. +## + +E2E_DIR="${0%/*}/.." +# shellcheck disable=SC1091 +source src/dev/ci_setup/setup_env.sh true +set -ex +"${E2E_DIR}"/run-e2e.sh diff --git a/x-pack/plugins/apm/e2e/cypress.json b/x-pack/plugins/apm/e2e/cypress.json index 0894cfd13a197..33304c2d8625f 100644 --- a/x-pack/plugins/apm/e2e/cypress.json +++ b/x-pack/plugins/apm/e2e/cypress.json @@ -1,7 +1,6 @@ { "nodeVersion": "system", "baseUrl": "http://localhost:5701", - "video": false, "trashAssetsBeforeRuns": false, "fileServerFolder": "../", "fixturesFolder": "./cypress/fixtures", @@ -9,6 +8,8 @@ "pluginsFile": "./cypress/plugins/index.js", "screenshotsFolder": "./cypress/screenshots", "supportFile": "./cypress/support/index.ts", + "video": true, + "videoCompression": false, "videosFolder": "./cypress/videos", "useRelativeSnapshots": true, "reporter": "junit", diff --git a/x-pack/plugins/apm/e2e/cypress/support/step_definitions/apm.ts b/x-pack/plugins/apm/e2e/cypress/support/step_definitions/apm.ts index 1e3228fdb0319..361d055db9ac1 100644 --- a/x-pack/plugins/apm/e2e/cypress/support/step_definitions/apm.ts +++ b/x-pack/plugins/apm/e2e/cypress/support/step_definitions/apm.ts @@ -7,6 +7,9 @@ import { Given, When, Then } from 'cypress-cucumber-preprocessor/steps'; import { loginAndWaitForPage } from '../../integration/helpers'; +/** The default time in ms to wait for a Cypress command to complete */ +export const DEFAULT_TIMEOUT = 60 * 1000; + Given(`a user browses the APM UI application`, () => { // open service overview page loginAndWaitForPage(`/app/apm#/services`); @@ -14,7 +17,9 @@ Given(`a user browses the APM UI application`, () => { When(`the user inspects the opbeans-node service`, () => { // click opbeans-node service - cy.get(':contains(opbeans-node)').last().click({ force: true }); + cy.get(':contains(opbeans-node)', { timeout: DEFAULT_TIMEOUT }) + .last() + .click({ force: true }); }); Then(`should redirect to correct path with correct params`, () => { diff --git a/x-pack/plugins/apm/e2e/cypress/videos/apm.feature.mp4 b/x-pack/plugins/apm/e2e/cypress/videos/apm.feature.mp4 new file mode 100644 index 0000000000000..93c310aaa0d4e Binary files /dev/null and b/x-pack/plugins/apm/e2e/cypress/videos/apm.feature.mp4 differ diff --git a/x-pack/plugins/apm/e2e/run-e2e.sh b/x-pack/plugins/apm/e2e/run-e2e.sh index ebfd29377e013..157c42cc7e4ee 100755 --- a/x-pack/plugins/apm/e2e/run-e2e.sh +++ b/x-pack/plugins/apm/e2e/run-e2e.sh @@ -1,4 +1,4 @@ -#!/bin/sh +#!/usr/bin/env bash # variables KIBANA_PORT=5701 @@ -65,6 +65,8 @@ ${APM_IT_DIR}/scripts/compose.py start master \ --elasticsearch-port $ELASTICSEARCH_PORT \ --apm-server-port=$APM_SERVER_PORT \ --elasticsearch-heap 4g \ + --apm-server-opt queue.mem.events=8192 \ + --apm-server-opt output.elasticsearch.bulk_max_size=4096 \ &> ${TMP_DIR}/apm-it.log # Stop if apm-integration-testing failed to start correctly @@ -98,7 +100,7 @@ curl --silent --user admin:changeme -XDELETE "localhost:${ELASTICSEARCH_PORT}/.a curl --silent --user admin:changeme -XDELETE "localhost:${ELASTICSEARCH_PORT}/apm*" > /dev/null # Ingest data into APM Server -node ingest-data/replay.js --server-url http://localhost:$APM_SERVER_PORT --events ${TMP_DIR}/events.json 2> ${TMP_DIR}/ingest-data.log +node ingest-data/replay.js --server-url http://localhost:$APM_SERVER_PORT --events ${TMP_DIR}/events.json 2>> ${TMP_DIR}/ingest-data.log # Stop if not all events were ingested correctly if [ $? -ne 0 ]; then @@ -113,6 +115,15 @@ echo "\n${bold}Waiting for Kibana to start...${normal}" echo "Note: you need to start Kibana manually. Find the instructions at the top." yarn wait-on -i 500 -w 500 http-get://admin:changeme@localhost:$KIBANA_PORT/api/status > /dev/null +## Workaround to wait for the http server running +## See: https://github.com/elastic/kibana/issues/66326 +if [ -e kibana.log ] ; then + grep -m 1 "http server running" <(tail -f -n +1 kibana.log) + echo "\n✅ Kibana server running...\n" + grep -m 1 "bundles compiled successfully" <(tail -f -n +1 kibana.log) + echo "\n✅ Kibana bundles have been compiled...\n" +fi + echo "\n✅ Setup completed successfully. Running tests...\n" # @@ -129,4 +140,3 @@ ${bold}If you want to run the test interactively, run:${normal} yarn cypress open --config pageLoadTimeout=100000,watchForFileChanges=true " - diff --git a/x-pack/plugins/ingest_pipelines/__jest__/client_integration/ingest_pipelines_clone.test.tsx b/x-pack/plugins/ingest_pipelines/__jest__/client_integration/ingest_pipelines_clone.test.tsx index 2901367892213..81ee1435be5bd 100644 --- a/x-pack/plugins/ingest_pipelines/__jest__/client_integration/ingest_pipelines_clone.test.tsx +++ b/x-pack/plugins/ingest_pipelines/__jest__/client_integration/ingest_pipelines_clone.test.tsx @@ -24,7 +24,8 @@ jest.mock('@elastic/eui', () => ({ ), })); -describe('', () => { +// FLAKY: https://github.com/elastic/kibana/issues/66856 +describe.skip('', () => { let testBed: PipelinesCloneTestBed; const { server, httpRequestsMockHelpers } = setupEnvironment(); diff --git a/x-pack/plugins/siem/common/endpoint/generate_data.test.ts b/x-pack/plugins/siem/common/endpoint/generate_data.test.ts index 3dcf20c41f269..3fcb00d879583 100644 --- a/x-pack/plugins/siem/common/endpoint/generate_data.test.ts +++ b/x-pack/plugins/siem/common/endpoint/generate_data.test.ts @@ -3,7 +3,7 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -import { EndpointDocGenerator, Event } from './generate_data'; +import { EndpointDocGenerator, Event, Tree, TreeNode } from './generate_data'; interface Node { events: Event[]; @@ -93,11 +93,105 @@ describe('data generator', () => { expect(processEvent.process.name).not.toBeNull(); }); + describe('creates a resolver tree structure', () => { + let tree: Tree; + const ancestors = 3; + const childrenPerNode = 3; + const generations = 3; + beforeEach(() => { + tree = generator.generateTree({ + alwaysGenMaxChildrenPerNode: true, + ancestors, + children: childrenPerNode, + generations, + percentTerminated: 100, + percentWithRelated: 100, + relatedEvents: 4, + }); + }); + + const eventInNode = (event: Event, node: TreeNode) => { + const inLifecycle = node.lifecycle.includes(event); + const inRelated = node.relatedEvents.includes(event); + + return (inRelated || inLifecycle) && event.process.entity_id === node.id; + }; + + it('has the right number of ancestors', () => { + expect(tree.ancestry.size).toEqual(ancestors); + }); + + it('has the right number of total children', () => { + // the total number of children (not including the origin) = ((childrenPerNode^(generations + 1) - 1) / (childrenPerNode - 1)) - 1 + // https://stackoverflow.com/questions/7842397/what-is-the-total-number-of-nodes-in-a-full-k-ary-tree-in-terms-of-the-number-o + const leaves = Math.pow(childrenPerNode, generations); + // last -1 is for the origin since it's not in the children map + const nodes = (childrenPerNode * leaves - 1) / (childrenPerNode - 1) - 1; + expect(tree.children.size).toEqual(nodes); + }); + + it('has 2 lifecycle events for ancestors, children, and the origin', () => { + for (const node of tree.ancestry.values()) { + expect(node.lifecycle.length).toEqual(2); + } + + for (const node of tree.children.values()) { + expect(node.lifecycle.length).toEqual(2); + } + + expect(tree.origin.lifecycle.length).toEqual(2); + }); + + it('has all events in one of the tree fields', () => { + expect(tree.allEvents.length).toBeGreaterThan(0); + + tree.allEvents.forEach((event) => { + if (event.event.kind === 'alert') { + expect(event).toEqual(tree.alertEvent); + } else { + const ancestor = tree.ancestry.get(event.process.entity_id); + if (ancestor) { + expect(eventInNode(event, ancestor)).toBeTruthy(); + return; + } + + const children = tree.children.get(event.process.entity_id); + if (children) { + expect(eventInNode(event, children)).toBeTruthy(); + return; + } + + expect(eventInNode(event, tree.origin)).toBeTruthy(); + } + }); + }); + + const nodeEventCount = (node: TreeNode) => { + return node.lifecycle.length + node.relatedEvents.length; + }; + + it('has the correct number of total events', () => { + // starts at 1 because the alert is in the allEvents array + let total = 1; + for (const node of tree.ancestry.values()) { + total += nodeEventCount(node); + } + + for (const node of tree.children.values()) { + total += nodeEventCount(node); + } + + total += nodeEventCount(tree.origin); + + expect(tree.allEvents.length).toEqual(total); + }); + }); + describe('creates alert ancestor tree', () => { let events: Event[]; beforeEach(() => { - events = generator.createAlertEventAncestry(3); + events = generator.createAlertEventAncestry(3, 0, 0, 0); }); it('with n-1 process events', () => { diff --git a/x-pack/plugins/siem/common/endpoint/generate_data.ts b/x-pack/plugins/siem/common/endpoint/generate_data.ts index f520e2bf91041..a683db86dc6a0 100644 --- a/x-pack/plugins/siem/common/endpoint/generate_data.ts +++ b/x-pack/plugins/siem/common/endpoint/generate_data.ts @@ -3,7 +3,6 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ - import uuid from 'uuid'; import seedrandom from 'seedrandom'; import { @@ -116,6 +115,61 @@ interface NodeState { maxChildren: number; } +/** + * The Tree and TreeNode interfaces define structures to make testing of resolver functionality easier. The `generateTree` + * method builds a `Tree` structures which organizes the different parts of the resolver tree. Maps are used to allow + * tests to quickly verify if the node they retrieved from ES was actually created by the generator or if there is an + * issue with the implementation. The `Tree` structure serves as a source of truth for queries to ES. The entire Tree + * is stored in memory so it can be quickly accessed by the tests. The resolver api_integration tests currently leverage + * these structures for verifying that its implementation is returning the correct documents from ES and structuring + * the response correctly. + */ + +/** + * Defines the fields for each node in the tree. + */ +export interface TreeNode { + /** + * The entity_id for the node + */ + id: string; + lifecycle: Event[]; + relatedEvents: Event[]; +} + +/** + * A resolver tree that makes accessing specific nodes easier for tests. + */ +export interface Tree { + /** + * Map of entity_id to node + */ + children: Map; + /** + * Map of entity_id to node + */ + ancestry: Map; + origin: TreeNode; + alertEvent: Event; + /** + * All events from children, ancestry, origin, and the alert in a single array + */ + allEvents: Event[]; +} + +export interface TreeOptions { + /** + * The value in ancestors does not include the origin/root node + */ + ancestors?: number; + generations?: number; + children?: number; + relatedEvents?: number; + percentWithRelated?: number; + percentTerminated?: number; + alwaysGenMaxChildrenPerNode?: boolean; +} + export class EndpointDocGenerator { commonInfo: HostInfo; random: seedrandom.prng; @@ -319,6 +373,78 @@ export class EndpointDocGenerator { }; } + /** + * This generates a full resolver tree and keeps the entire tree in memory. This is useful for tests that want + * to compare results from elasticsearch with the actual events created by this generator. Because all the events + * are stored in memory do not use this function to generate large trees. + * + * @param options - options for the layout of the tree, like how many children, generations, and ancestry + * @returns a Tree structure that makes accessing specific events easier + */ + public generateTree(options: TreeOptions = {}): Tree { + const addEventToMap = (nodeMap: Map, event: Event) => { + const nodeId = event.process.entity_id; + // if a node already exists for the entity_id we'll use that one, otherwise let's create a new empty node + // and add the event to the right array. + let node = nodeMap.get(nodeId); + if (!node) { + node = { id: nodeId, lifecycle: [], relatedEvents: [] }; + } + + // place the event in the right array depending on its category + if (event.event.category === 'process') { + node.lifecycle.push(event); + } else { + node.relatedEvents.push(event); + } + return nodeMap.set(nodeId, node); + }; + + const ancestry = this.createAlertEventAncestry( + options.ancestors, + options.relatedEvents, + options.percentWithRelated, + options.percentTerminated + ); + + // create a mapping of entity_id -> lifecycle and related events + // slice gets everything but the last item which is an alert + const ancestryNodes: Map = ancestry + .slice(0, -1) + .reduce(addEventToMap, new Map()); + + const alert = ancestry[ancestry.length - 1]; + const origin = ancestryNodes.get(alert.process.entity_id); + if (!origin) { + throw Error(`could not find origin while building tree: ${alert.process.entity_id}`); + } + + // remove the origin node from the ancestry array + ancestryNodes.delete(alert.process.entity_id); + + const children = Array.from( + this.descendantsTreeGenerator( + alert, + options.generations, + options.children, + options.relatedEvents, + options.percentWithRelated, + options.percentTerminated, + options.alwaysGenMaxChildrenPerNode + ) + ); + + const childrenNodes: Map = children.reduce(addEventToMap, new Map()); + + return { + children: childrenNodes, + ancestry: ancestryNodes, + alertEvent: alert, + allEvents: [...ancestry, ...children], + origin, + }; + } + /** * Generator function that creates the full set of events needed to render resolver. * The number of nodes grows exponentially with the number of generations and children per node. @@ -328,7 +454,8 @@ export class EndpointDocGenerator { * @param maxChildrenPerNode - maximum number of children for any given node in the tree * @param relatedEventsPerNode - number of related events (file, registry, etc) to create for each process event in the tree * @param percentNodesWithRelated - percent of nodes which should have related events - * @param percentChildrenTerminated - percent of nodes which will have process termination events + * @param percentTerminated - percent of nodes which will have process termination events + * @param alwaysGenMaxChildrenPerNode - flag to always return the max children per node instead of it being a random number of children */ public *fullResolverTreeGenerator( alertAncestors?: number, @@ -336,12 +463,14 @@ export class EndpointDocGenerator { maxChildrenPerNode?: number, relatedEventsPerNode?: number, percentNodesWithRelated?: number, - percentChildrenTerminated?: number + percentTerminated?: number, + alwaysGenMaxChildrenPerNode?: boolean ) { const ancestry = this.createAlertEventAncestry( alertAncestors, relatedEventsPerNode, - percentNodesWithRelated + percentNodesWithRelated, + percentTerminated ); for (let i = 0; i < ancestry.length; i++) { yield ancestry[i]; @@ -353,24 +482,31 @@ export class EndpointDocGenerator { maxChildrenPerNode, relatedEventsPerNode, percentNodesWithRelated, - percentChildrenTerminated + percentTerminated, + alwaysGenMaxChildrenPerNode ); } /** * Creates an alert event and associated process ancestry. The alert event will always be the last event in the return array. * @param alertAncestors - number of ancestor generations to create + * @param relatedEventsPerNode - number of related events to add to each process node being created + * @param pctWithRelated - percent of ancestors that will have related events + * @param pctWithTerminated - percent of ancestors that will have termination events */ public createAlertEventAncestry( alertAncestors = 3, relatedEventsPerNode = 5, - pctWithRelated = 30 + pctWithRelated = 30, + pctWithTerminated = 100 ): Event[] { const events = []; const startDate = new Date().getTime(); const root = this.generateEvent({ timestamp: startDate + 1000 }); events.push(root); let ancestor = root; + let timestamp = root['@timestamp'] + 1000; + // generate related alerts for root const processDuration: number = 6 * 3600; if (this.randomN(100) < pctWithRelated) { @@ -382,12 +518,41 @@ export class EndpointDocGenerator { events.push(relatedEvent); } } + + // generate the termination event for the root + if (this.randomN(100) < pctWithTerminated) { + const termProcessDuration = this.randomN(1000000); // This lets termination events be up to 1 million seconds after the creation event (~11 days) + events.push( + this.generateEvent({ + timestamp: timestamp + termProcessDuration * 1000, + entityID: root.process.entity_id, + parentEntityID: root.process.parent?.entity_id, + eventCategory: 'process', + eventType: 'end', + }) + ); + } + for (let i = 0; i < alertAncestors; i++) { ancestor = this.generateEvent({ - timestamp: startDate + 1000 * (i + 1), + timestamp, parentEntityID: ancestor.process.entity_id, }); events.push(ancestor); + timestamp = timestamp + 1000; + + if (this.randomN(100) < pctWithTerminated) { + const termProcessDuration = this.randomN(1000000); // This lets termination events be up to 1 million seconds after the creation event (~11 days) + events.push( + this.generateEvent({ + timestamp: timestamp + termProcessDuration * 1000, + entityID: ancestor.process.entity_id, + parentEntityID: ancestor.process.parent?.entity_id, + eventCategory: 'process', + eventType: 'end', + }) + ); + } // generate related alerts for ancestor if (this.randomN(100) < pctWithRelated) { @@ -401,11 +566,7 @@ export class EndpointDocGenerator { } } events.push( - this.generateAlert( - startDate + 1000 * alertAncestors, - ancestor.process.entity_id, - ancestor.process.parent?.entity_id - ) + this.generateAlert(timestamp, ancestor.process.entity_id, ancestor.process.parent?.entity_id) ); return events; } @@ -418,6 +579,7 @@ export class EndpointDocGenerator { * @param relatedEventsPerNode - number of related events (file, registry, etc) to create for each process event in the tree * @param percentNodesWithRelated - percent of nodes which should have related events * @param percentChildrenTerminated - percent of nodes which will have process termination events + * @param alwaysGenMaxChildrenPerNode - flag to always return the max children per node instead of it being a random number of children */ public *descendantsTreeGenerator( root: Event, @@ -425,12 +587,18 @@ export class EndpointDocGenerator { maxChildrenPerNode = 2, relatedEventsPerNode = 3, percentNodesWithRelated = 100, - percentChildrenTerminated = 100 + percentChildrenTerminated = 100, + alwaysGenMaxChildrenPerNode = false ) { + let maxChildren = this.randomN(maxChildrenPerNode + 1); + if (alwaysGenMaxChildrenPerNode) { + maxChildren = maxChildrenPerNode; + } + const rootState: NodeState = { event: root, childrenCreated: 0, - maxChildren: this.randomN(maxChildrenPerNode + 1), + maxChildren, }; const lineage: NodeState[] = [rootState]; let timestamp = root['@timestamp']; @@ -452,10 +620,15 @@ export class EndpointDocGenerator { timestamp, parentEntityID: currentState.event.process.entity_id, }); + + maxChildren = this.randomN(maxChildrenPerNode + 1); + if (alwaysGenMaxChildrenPerNode) { + maxChildren = maxChildrenPerNode; + } lineage.push({ event: child, childrenCreated: 0, - maxChildren: this.randomN(maxChildrenPerNode + 1), + maxChildren, }); yield child; let processDuration: number = 6 * 3600; diff --git a/x-pack/plugins/siem/common/endpoint/models/event.ts b/x-pack/plugins/siem/common/endpoint/models/event.ts index 192daba4a717d..2c325d64f8515 100644 --- a/x-pack/plugins/siem/common/endpoint/models/event.ts +++ b/x-pack/plugins/siem/common/endpoint/models/event.ts @@ -9,6 +9,13 @@ export function isLegacyEvent(event: ResolverEvent): event is LegacyEndpointEven return (event as LegacyEndpointEvent).endgame !== undefined; } +export function isProcessStart(event: ResolverEvent): boolean { + if (isLegacyEvent(event)) { + return event.event?.type === 'process_start' || event.event?.action === 'fork_event'; + } + return event.event.type === 'start'; +} + export function eventTimestamp(event: ResolverEvent): string | undefined | number { if (isLegacyEvent(event)) { return event.endgame.timestamp_utc; diff --git a/x-pack/plugins/siem/common/endpoint/schema/resolver.ts b/x-pack/plugins/siem/common/endpoint/schema/resolver.ts index f21307e407fd0..8d60a532aa67c 100644 --- a/x-pack/plugins/siem/common/endpoint/schema/resolver.ts +++ b/x-pack/plugins/siem/common/endpoint/schema/resolver.ts @@ -51,8 +51,8 @@ export const validateAncestry = { export const validateChildren = { params: schema.object({ id: schema.string() }), query: schema.object({ - children: schema.number({ defaultValue: 10, min: 10, max: 100 }), - generations: schema.number({ defaultValue: 3, min: 0, max: 3 }), + children: schema.number({ defaultValue: 10, min: 1, max: 100 }), + generations: schema.number({ defaultValue: 3, min: 1, max: 3 }), afterChild: schema.maybe(schema.string()), legacyEndpointID: schema.maybe(schema.string()), }), diff --git a/x-pack/plugins/siem/common/endpoint/types.ts b/x-pack/plugins/siem/common/endpoint/types.ts index 43c7e20445d5d..3a86014c57148 100644 --- a/x-pack/plugins/siem/common/endpoint/types.ts +++ b/x-pack/plugins/siem/common/endpoint/types.ts @@ -33,31 +33,97 @@ type ImmutableMap = ReadonlyMap, Immutable>; type ImmutableSet = ReadonlySet>; type ImmutableObject = { readonly [K in keyof T]: Immutable }; +/** + * Statistical information for a node in a resolver tree. + */ export interface ResolverNodeStats { + /** + * The total number of related events (all events except process and alerts) that exist for a node. + */ totalEvents: number; + /** + * The total number of alerts that exist for a node. + */ totalAlerts: number; } -export interface ResolverNodePagination { - nextChild?: string | null; - nextEvent?: string | null; - nextAncestor?: string | null; - nextAlert?: string | null; +/** + * A child node can also have additional children so we need to provide a pagination cursor. + */ +export interface ChildNode extends LifecycleNode { + /** + * A child node's pagination cursor can be null for a couple reasons: + * 1. At the time of querying it could have no children in ES, in which case it will be marked as + * null because we know it does not have children during this query. + * 2. If the max level was reached we do not know if this node has children or not so we'll mark it as null + */ + nextChild: string | null; } /** - * A node that contains pointers to other nodes, arrrays of resolver events, and any metadata associated with resolver specific data + * The response structure for the children route. The structure is an array of nodes where each node + * has an array of lifecycle events. */ -export interface ResolverNode { - id: string; - children: ResolverNode[]; - events: ResolverEvent[]; +export interface ResolverChildren { + childNodes: ChildNode[]; + /** + * This is the children cursor for the origin of a tree. + */ + nextChild: string | null; +} + +/** + * A flattened tree representing the nodes in a resolver graph. + */ +export interface ResolverTree { + /** + * Origin of the tree. This is in the middle of the tree. Typically this would be the same + * process node that generated an alert. + */ + entityID: string; + children: ResolverChildren; + relatedEvents: Omit; + ancestry: ResolverAncestry; lifecycle: ResolverEvent[]; - ancestors?: ResolverNode[]; - pagination: ResolverNodePagination; + stats: ResolverNodeStats; +} + +/** + * The lifecycle events (start, end etc) for a node. + */ +export interface LifecycleNode { + entityID: string; + lifecycle: ResolverEvent[]; + /** + * stats are only set when the entire tree is being fetched + */ stats?: ResolverNodeStats; } +/** + * The response structure when searching for ancestors of a node. + */ +export interface ResolverAncestry { + /** + * An array of ancestors with the lifecycle events grouped together + */ + ancestors: LifecycleNode[]; + /** + * A cursor for retrieving additional ancestors for a particular node. `null` indicates that there were no additional + * ancestors when the request returned. More could have been ingested by ES after the fact though. + */ + nextAncestor: string | null; +} + +/** + * Response structure for the related events route. + */ +export interface ResolverRelatedEvents { + entityID: string; + events: ResolverEvent[]; + nextEvent: string | null; +} + /** * Returned by the server via /api/endpoint/metadata */ @@ -302,6 +368,10 @@ export interface LegacyEndpointEvent { process?: object; rule?: object; user?: object; + event?: { + action?: string; + type?: string; + }; } export interface EndpointEvent { diff --git a/x-pack/plugins/siem/public/resolver/store/middleware.ts b/x-pack/plugins/siem/public/resolver/store/middleware.ts index 4ab0cd6397a2f..038163a164c6c 100644 --- a/x-pack/plugins/siem/public/resolver/store/middleware.ts +++ b/x-pack/plugins/siem/public/resolver/store/middleware.ts @@ -9,7 +9,13 @@ import { HttpHandler } from 'kibana/public'; import { KibanaReactContextValue } from '../../../../../../src/plugins/kibana_react/public'; import { StartServices } from '../../types'; import { ResolverState, ResolverAction, RelatedEventDataEntry } from '../types'; -import { ResolverEvent, ResolverNode } from '../../../common/endpoint/types'; +import { + ResolverEvent, + ResolverChildren, + ResolverAncestry, + LifecycleNode, + ResolverRelatedEvents, +} from '../../../common/endpoint/types'; import * as event from '../../../common/endpoint/models/event'; type MiddlewareFactory = ( @@ -18,20 +24,17 @@ type MiddlewareFactory = ( api: MiddlewareAPI, S> ) => (next: Dispatch) => (action: ResolverAction) => unknown; -function flattenEvents(children: ResolverNode[], events: ResolverEvent[] = []): ResolverEvent[] { - return children.reduce((flattenedEvents, currentNode) => { +function getLifecycleEvents(nodes: LifecycleNode[], events: ResolverEvent[] = []): ResolverEvent[] { + return nodes.reduce((flattenedEvents, currentNode) => { if (currentNode.lifecycle && currentNode.lifecycle.length > 0) { flattenedEvents.push(...currentNode.lifecycle); } - if (currentNode.children && currentNode.children.length > 0) { - return flattenEvents(currentNode.children, events); - } else { - return flattenedEvents; - } + + return flattenedEvents; }, events); } -type RelatedEventAPIResponse = 'error' | { events: ResolverEvent[] }; +type RelatedEventAPIResponse = 'error' | ResolverRelatedEvents; /** * As the design goal of this stopgap was to prevent saturating the server with /events * requests, this generator intentionally processes events in serial rather than in parallel. @@ -69,19 +72,19 @@ export const resolverMiddlewareFactory: MiddlewareFactory = (context) => { api.dispatch({ type: 'appRequestedResolverData' }); try { let lifecycle: ResolverEvent[]; - let children: ResolverNode[]; - let ancestors: ResolverNode[]; + let children: ResolverChildren; + let ancestry: ResolverAncestry; if (event.isLegacyEvent(action.payload.selectedEvent)) { const entityId = action.payload.selectedEvent?.endgame?.unique_pid; const legacyEndpointID = action.payload.selectedEvent?.agent?.id; - [{ lifecycle, children, ancestors }] = await Promise.all([ + [{ lifecycle, children, ancestry }] = await Promise.all([ context.services.http.get(`/api/endpoint/resolver/${entityId}`, { query: { legacyEndpointID, children: 5, ancestors: 5 }, }), ]); } else { const entityId = action.payload.selectedEvent.process.entity_id; - [{ lifecycle, children, ancestors }] = await Promise.all([ + [{ lifecycle, children, ancestry }] = await Promise.all([ context.services.http.get(`/api/endpoint/resolver/${entityId}`, { query: { children: 5, @@ -92,8 +95,8 @@ export const resolverMiddlewareFactory: MiddlewareFactory = (context) => { } const response: ResolverEvent[] = [ ...lifecycle, - ...flattenEvents(children), - ...flattenEvents(ancestors), + ...getLifecycleEvents(children.childNodes), + ...getLifecycleEvents(ancestry.ancestors), ]; api.dispatch({ type: 'serverReturnedResolverData', diff --git a/x-pack/plugins/siem/scripts/endpoint/README.md b/x-pack/plugins/siem/scripts/endpoint/README.md index 84e92d8c397e1..5a1f2982b22ec 100644 --- a/x-pack/plugins/siem/scripts/endpoint/README.md +++ b/x-pack/plugins/siem/scripts/endpoint/README.md @@ -1,3 +1,5 @@ +# Resolver Generator Script + This script makes it easy to create the endpoint metadata, alert, and event documents needed to test Resolver in Kibana. The default behavior is to create 1 endpoint with 1 alert and a moderate number of events (random, typically on the order of 20). A seed value can be provided as a string for the random number generator for repeatable behavior, useful for demos etc. @@ -5,42 +7,62 @@ Use the `-d` option if you want to delete and remake the indices, otherwise it w The sample data generator script depends on ts-node, install with npm: -```npm install -g ts-node``` +`npm install -g ts-node` Example command sequence to get ES and kibana running with sample data after installing ts-node: -```yarn es snapshot``` -> starts ES +`yarn es snapshot` -> starts ES -```npx yarn start --xpack.siem.endpoint.enabled=true --no-base-path``` -> starts kibana +`npx yarn start --xpack.siem.endpoint.enabled=true --no-base-path` -> starts kibana -```cd ~/path/to/kibana/x-pack/plugins/endpoint``` +`cd ~/path/to/kibana/x-pack/plugins/endpoint` -```yarn test:generate --auth elastic:changeme``` -> run the resolver_generator.ts script +`yarn test:generate --auth elastic:changeme` -> run the resolver_generator.ts script Resolver generator CLI options: -```--help Show help [boolean] - --seed, -s random seed to use for document generator [string] - --node, -n elasticsearch node url + +```bash +Options: + --help Show help [boolean] + --seed, -s random seed to use for document generator + [string] + --node, -n elasticsearch node url [string] [default: "http://localhost:9200"] - --eventIndex, --ei index to store events in + --alertIndex, --ai index to store alerts in + [string] [default: "events-endpoint-1"] + --eventIndex, --ei index to store events in [string] [default: "events-endpoint-1"] - --metadataIndex, --mi index to store endpoint metadata in - [string] [default: "endpoint-agent-1"] - --auth elasticsearch username and password, separated by - a colon [string] - --ancestors, --anc number of ancestors of origin to create + --metadataIndex, --mi index to store host metadata in + [string] [default: "metrics-endpoint-default-1"] + --policyIndex, --pi index to store host policy in + [string] [default: "metrics-endpoint.policy-default-1"] + --auth elasticsearch username and password, separated + by a colon [string] + --ancestors, --anc number of ancestors of origin to create [number] [default: 3] - --generations, --gen number of child generations to create + --generations, --gen number of child generations to create [number] [default: 3] - --children, --ch maximum number of children per node + --children, --ch maximum number of children per node [number] [default: 3] - --relatedEvents, --related number of related events to create for each - process event [number] [default: 5] - --percentWithRelated, --pr percent of process events to add related events to - [number] [default: 30] - --percentTerminated, --pt percent of process events to add termination event - for [number] [default: 30] - --numEndpoints, --ne number of different endpoints to generate alerts - for [number] [default: 1] - --alertsPerEndpoint, --ape number of resolver trees to make for each endpoint - [number] [default: 1]``` + --relatedEvents, --related number of related events to create for each + process event [number] [default: 5] + --percentWithRelated, --pr percent of process events to add related events + to [number] [default: 30] + --percentTerminated, --pt percent of process events to add termination + event for [number] [default: 30] + --maxChildrenPerNode, --maxCh always generate the max number of children per + node instead of it being random up to the max + children [boolean] [default: false] + --numHosts, --ne number of different hosts to generate alerts + for [number] [default: 1] + --numDocs, --nd number of metadata and policy response doc to + generate per host [number] [default: 5] + --alertsPerHost, --ape number of resolver trees to make for each host + [number] [default: 1] + --delete, -d delete indices and remake them + [boolean] [default: false] + --setupOnly, --so Run only the index and pipeline creation then + exit. This is intended to be used to set up the + Endpoint App for use with the real Elastic + Endpoint. [boolean] [default: false] +``` diff --git a/x-pack/plugins/siem/scripts/endpoint/resolver_generator.ts b/x-pack/plugins/siem/scripts/endpoint/resolver_generator.ts index 05663ad249be1..77bf200eeb540 100644 --- a/x-pack/plugins/siem/scripts/endpoint/resolver_generator.ts +++ b/x-pack/plugins/siem/scripts/endpoint/resolver_generator.ts @@ -119,6 +119,13 @@ async function main() { type: 'number', default: 30, }, + maxChildrenPerNode: { + alias: 'maxCh', + describe: + 'always generate the max number of children per node instead of it being random up to the max children', + type: 'boolean', + default: false, + }, numHosts: { alias: 'ne', describe: 'number of different hosts to generate alerts for', @@ -241,7 +248,8 @@ async function main() { argv.children, argv.relatedEvents, argv.percentWithRelated, - argv.percentTerminated + argv.percentTerminated, + argv.maxChildrenPerNode ); let result = resolverDocGenerator.next(); while (!result.done) { diff --git a/x-pack/plugins/siem/server/endpoint/routes/resolver/ancestry.ts b/x-pack/plugins/siem/server/endpoint/routes/resolver/ancestry.ts index 233f23bd314c1..a2af73da454ad 100644 --- a/x-pack/plugins/siem/server/endpoint/routes/resolver/ancestry.ts +++ b/x-pack/plugins/siem/server/endpoint/routes/resolver/ancestry.ts @@ -26,10 +26,10 @@ export function handleAncestry( const indexPattern = await indexRetriever.getEventIndexPattern(context); const fetcher = new Fetcher(client, id, indexPattern, endpointID); - const tree = await fetcher.ancestors(ancestors + 1); + const ancestorInfo = await fetcher.ancestors(ancestors); return res.ok({ - body: tree.render(), + body: ancestorInfo, }); } catch (err) { log.warn(err); diff --git a/x-pack/plugins/siem/server/endpoint/routes/resolver/children.ts b/x-pack/plugins/siem/server/endpoint/routes/resolver/children.ts index 13af514c4c55f..4eb25b03acb45 100644 --- a/x-pack/plugins/siem/server/endpoint/routes/resolver/children.ts +++ b/x-pack/plugins/siem/server/endpoint/routes/resolver/children.ts @@ -24,12 +24,10 @@ export function handleChildren( const indexPattern = await indexRetriever.getEventIndexPattern(context); const client = context.core.elasticsearch.dataClient; - const fetcher = new Fetcher(client, id, indexPattern, endpointID); - const tree = await fetcher.children(children, generations, afterChild); return res.ok({ - body: tree.render(), + body: await fetcher.children(children, generations, afterChild), }); } catch (err) { log.warn(err); diff --git a/x-pack/plugins/siem/server/endpoint/routes/resolver/events.ts b/x-pack/plugins/siem/server/endpoint/routes/resolver/events.ts index 97f718b66a437..ac720b7569746 100644 --- a/x-pack/plugins/siem/server/endpoint/routes/resolver/events.ts +++ b/x-pack/plugins/siem/server/endpoint/routes/resolver/events.ts @@ -25,10 +25,9 @@ export function handleEvents( const indexPattern = await indexRetriever.getEventIndexPattern(context); const fetcher = new Fetcher(client, id, indexPattern, endpointID); - const tree = await fetcher.events(events, afterEvent); return res.ok({ - body: tree.render(), + body: await fetcher.events(events, afterEvent), }); } catch (err) { log.warn(err); diff --git a/x-pack/plugins/siem/server/endpoint/routes/resolver/queries/base.ts b/x-pack/plugins/siem/server/endpoint/routes/resolver/queries/base.ts index 4f6003492fd3a..440b578bde413 100644 --- a/x-pack/plugins/siem/server/endpoint/routes/resolver/queries/base.ts +++ b/x-pack/plugins/siem/server/endpoint/routes/resolver/queries/base.ts @@ -7,50 +7,98 @@ import { SearchResponse } from 'elasticsearch'; import { IScopedClusterClient } from 'kibana/server'; import { ResolverEvent } from '../../../../../common/endpoint/types'; -import { - paginate, - paginatedResults, - PaginationParams, - PaginatedResults, -} from '../utils/pagination'; import { JsonObject } from '../../../../../../../../src/plugins/kibana_utils/public'; import { legacyEventIndexPattern } from './legacy_event_index_pattern'; +import { MSearchQuery } from './multi_searcher'; -export abstract class ResolverQuery { - constructor( - private readonly indexPattern: string, - private readonly endpointID?: string, - private readonly pagination?: PaginationParams - ) {} - - protected paginateBy(tiebreaker: string, aggregator: string) { - return (query: JsonObject) => { - if (!this.pagination) { - return query; - } - return paginate(this.pagination, tiebreaker, aggregator, query); - }; +/** + * ResolverQuery provides the base structure for queries to retrieve events when building a resolver graph. + * + * @param T the structured return type of a resolver query. This represents the type that is returned when translating + * Elasticsearch's SearchResponse response. + */ +export abstract class ResolverQuery implements MSearchQuery { + /** + * + * @param indexPattern the index pattern to use in the query for finding indices with documents in ES. + * @param endpointID this field is optional because it is only used when searching for legacy event data. The reason + * we need `endpointID` for legacy data is because we don't have a cross endpoint unique identifier for process + * events. Instead we use `unique_pid/ppid` and `endpointID` to uniquely identify a process event. + */ + constructor(private readonly indexPattern: string, private readonly endpointID?: string) {} + + private static createIdsArray(ids: string | string[]): string[] { + return Array.isArray(ids) ? ids : [ids]; } - build(...ids: string[]) { + private buildQuery(ids: string | string[]): { query: JsonObject; index: string } { + const idsArray = ResolverQuery.createIdsArray(ids); if (this.endpointID) { - return this.legacyQuery(this.endpointID, ids, legacyEventIndexPattern); + return { query: this.legacyQuery(this.endpointID, idsArray), index: legacyEventIndexPattern }; } - return this.query(ids, this.indexPattern); + return { query: this.query(idsArray), index: this.indexPattern }; + } + + private buildSearch(ids: string | string[]) { + const { query, index } = this.buildQuery(ids); + return { + body: query, + index, + }; + } + + protected static getResults(response: SearchResponse): ResolverEvent[] { + return response.hits.hits.map((hit) => hit._source); } - async search(client: IScopedClusterClient, ...ids: string[]) { - return this.postSearch(await client.callAsCurrentUser('search', this.build(...ids))); + /** + * Builds a multi search representation for this query + * + * @param ids a single or multiple unique id (e.g. entity_id for new events or unique_pid for legacy events) to search for in the query + * @returns an array of header and body pairs that represents a multi search + * https://www.elastic.co/guide/en/elasticsearch/reference/current/search-multi-search.html + * https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/msearch_examples.html + */ + buildMSearch(ids: string | string[]): JsonObject[] { + const { query, index } = this.buildQuery(ids); + return [{ index }, query]; } - protected postSearch(response: SearchResponse): PaginatedResults { - return paginatedResults(response); + /** + * Searches ES for the specified ids. + * + * @param client a client for searching ES + * @param ids a single more multiple unique node ids (e.g. entity_id or unique_pid) + */ + async search(client: IScopedClusterClient, ids: string | string[]): Promise { + const res: SearchResponse = await client.callAsCurrentUser( + 'search', + this.buildSearch(ids) + ); + return this.formatResponse(res); } - protected abstract legacyQuery( - endpointID: string, - uniquePIDs: string[], - index: string - ): JsonObject; - protected abstract query(entityIDs: string[], index: string): JsonObject; + /** + * Builds a query to search the legacy data format. + * + * @param endpointID a unique identifier for a sensor + * @param uniquePIDs array of unique process IDs to search for + * @returns a query to use in ES + */ + protected abstract legacyQuery(endpointID: string, uniquePIDs: string[]): JsonObject; + + /** + * Builds a query to search for events in ES. + * + * @param entityIDs array of unique identifiers for events treated as nodes + */ + protected abstract query(entityIDs: string[]): JsonObject; + + /** + * Translates the response from executing the derived class's query into a structured object + * + * @param response a SearchResponse from ES resulting from executing this query + * @returns the translated ES response into a structured object + */ + public abstract formatResponse(response: SearchResponse): T; } diff --git a/x-pack/plugins/siem/server/endpoint/routes/resolver/queries/children.test.ts b/x-pack/plugins/siem/server/endpoint/routes/resolver/queries/children.test.ts index 2a097e87c38b2..a4d4cd546ef60 100644 --- a/x-pack/plugins/siem/server/endpoint/routes/resolver/queries/children.test.ts +++ b/x-pack/plugins/siem/server/endpoint/routes/resolver/queries/children.test.ts @@ -4,109 +4,29 @@ * you may not use this file except in compliance with the Elastic License. */ import { ChildrenQuery } from './children'; +import { PaginationBuilder } from '../utils/pagination'; import { legacyEventIndexPattern } from './legacy_event_index_pattern'; -export const fakeEventIndexPattern = 'events-endpoint-*'; - -describe('children events query', () => { - it('generates the correct legacy queries', () => { - const timestamp = new Date().getTime(); - expect( - new ChildrenQuery(legacyEventIndexPattern, 'awesome-id', { - size: 1, - timestamp, - eventID: 'foo', - }).build('5') - ).toStrictEqual({ - body: { - query: { - bool: { - filter: [ - { - terms: { 'endgame.unique_ppid': ['5'] }, - }, - { - term: { 'agent.id': 'awesome-id' }, - }, - { - term: { 'event.category': 'process' }, - }, - { - term: { 'event.kind': 'event' }, - }, - { - bool: { - should: [ - { - term: { 'event.type': 'process_start' }, - }, - { - term: { 'event.action': 'fork_event' }, - }, - ], - }, - }, - ], - }, - }, - aggs: { - totals: { - terms: { - field: 'endgame.unique_ppid', - size: 1, - }, - }, - }, - search_after: [timestamp, 'foo'], - size: 1, - sort: [{ '@timestamp': 'asc' }, { 'endgame.serial_event_id': 'asc' }], - }, - index: legacyEventIndexPattern, +describe('Children query', () => { + it('constructs a legacy multi search query', () => { + const query = new ChildrenQuery(new PaginationBuilder(1), 'index-pattern', 'endpointID'); + // using any here because otherwise ts complains that it doesn't know what bool and filter are + // eslint-disable-next-line @typescript-eslint/no-explicit-any + const msearch: any = query.buildMSearch('1234'); + expect(msearch[0].index).toBe(legacyEventIndexPattern); + expect(msearch[1].query.bool.filter[0]).toStrictEqual({ + terms: { 'endgame.unique_ppid': ['1234'] }, }); }); - it('generates the correct non-legacy queries', () => { - const timestamp = new Date().getTime(); - - expect( - new ChildrenQuery(fakeEventIndexPattern, undefined, { - size: 1, - timestamp, - eventID: 'bar', - }).build('baz') - ).toStrictEqual({ - body: { - query: { - bool: { - filter: [ - { - terms: { 'process.parent.entity_id': ['baz'] }, - }, - { - term: { 'event.category': 'process' }, - }, - { - term: { 'event.kind': 'event' }, - }, - { - term: { 'event.type': 'start' }, - }, - ], - }, - }, - aggs: { - totals: { - terms: { - field: 'process.parent.entity_id', - size: 1, - }, - }, - }, - search_after: [timestamp, 'bar'], - size: 1, - sort: [{ '@timestamp': 'asc' }, { 'event.id': 'asc' }], - }, - index: fakeEventIndexPattern, + it('constructs a non-legacy multi search query', () => { + const query = new ChildrenQuery(new PaginationBuilder(1), 'index-pattern'); + // using any here because otherwise ts complains that it doesn't know what bool and filter are + // eslint-disable-next-line @typescript-eslint/no-explicit-any + const msearch: any = query.buildMSearch(['1234', '5678']); + expect(msearch[0].index).toBe('index-pattern'); + expect(msearch[1].query.bool.filter[0]).toStrictEqual({ + terms: { 'process.parent.entity_id': ['1234', '5678'] }, }); }); }); diff --git a/x-pack/plugins/siem/server/endpoint/routes/resolver/queries/children.ts b/x-pack/plugins/siem/server/endpoint/routes/resolver/queries/children.ts index 690c926d7e6d6..e4b2559a1780c 100644 --- a/x-pack/plugins/siem/server/endpoint/routes/resolver/queries/children.ts +++ b/x-pack/plugins/siem/server/endpoint/routes/resolver/queries/children.ts @@ -3,72 +3,92 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ +import { SearchResponse } from 'elasticsearch'; +import { ResolverEvent } from '../../../../../common/endpoint/types'; import { ResolverQuery } from './base'; +import { PaginationBuilder, PaginatedResults } from '../utils/pagination'; +import { JsonObject } from '../../../../../../../../src/plugins/kibana_utils/public'; -export class ChildrenQuery extends ResolverQuery { - protected legacyQuery(endpointID: string, uniquePIDs: string[], index: string) { - const paginator = this.paginateBy('endgame.serial_event_id', 'endgame.unique_ppid'); +/** + * Builds a query for retrieving descendants of a node. + */ +export class ChildrenQuery extends ResolverQuery { + constructor( + private readonly pagination: PaginationBuilder, + indexPattern: string, + endpointID?: string + ) { + super(indexPattern, endpointID); + } + + protected legacyQuery(endpointID: string, uniquePIDs: string[]): JsonObject { return { - body: paginator({ - query: { - bool: { - filter: [ - { - terms: { 'endgame.unique_ppid': uniquePIDs }, - }, - { - term: { 'agent.id': endpointID }, + query: { + bool: { + filter: [ + { + terms: { 'endgame.unique_ppid': uniquePIDs }, + }, + { + term: { 'agent.id': endpointID }, + }, + { + term: { 'event.category': 'process' }, + }, + { + term: { 'event.kind': 'event' }, + }, + { + bool: { + should: [ + { + term: { 'event.type': 'process_start' }, + }, + { + term: { 'event.action': 'fork_event' }, + }, + ], }, - { - term: { 'event.category': 'process' }, - }, - { - term: { 'event.kind': 'event' }, - }, - { - bool: { - should: [ - { - term: { 'event.type': 'process_start' }, - }, - { - term: { 'event.action': 'fork_event' }, - }, - ], - }, - }, - ], - }, + }, + ], }, - }), - index, + }, + ...this.pagination.buildQueryFields( + uniquePIDs.length, + 'endgame.serial_event_id', + 'endgame.unique_ppid' + ), }; } - protected query(entityIDs: string[], index: string) { - const paginator = this.paginateBy('event.id', 'process.parent.entity_id'); + protected query(entityIDs: string[]): JsonObject { return { - body: paginator({ - query: { - bool: { - filter: [ - { - terms: { 'process.parent.entity_id': entityIDs }, - }, - { - term: { 'event.category': 'process' }, - }, - { - term: { 'event.kind': 'event' }, - }, - { - term: { 'event.type': 'start' }, - }, - ], - }, + query: { + bool: { + filter: [ + { + terms: { 'process.parent.entity_id': entityIDs }, + }, + { + term: { 'event.category': 'process' }, + }, + { + term: { 'event.kind': 'event' }, + }, + { + term: { 'event.type': 'start' }, + }, + ], }, - }), - index, + }, + ...this.pagination.buildQueryFields(entityIDs.length, 'event.id', 'process.parent.entity_id'), + }; + } + + formatResponse(response: SearchResponse): PaginatedResults { + return { + results: ResolverQuery.getResults(response), + totals: PaginationBuilder.getTotals(response.aggregations), }; } } diff --git a/x-pack/plugins/siem/server/endpoint/routes/resolver/queries/events.test.ts b/x-pack/plugins/siem/server/endpoint/routes/resolver/queries/events.test.ts index 78e5ee9226581..00d7570b2b65e 100644 --- a/x-pack/plugins/siem/server/endpoint/routes/resolver/queries/events.test.ts +++ b/x-pack/plugins/siem/server/endpoint/routes/resolver/queries/events.test.ts @@ -4,101 +4,29 @@ * you may not use this file except in compliance with the Elastic License. */ import { EventsQuery } from './events'; -import { fakeEventIndexPattern } from './children.test'; +import { PaginationBuilder } from '../utils/pagination'; import { legacyEventIndexPattern } from './legacy_event_index_pattern'; -describe('related events query', () => { - it('generates the correct legacy queries', () => { - const timestamp = new Date().getTime(); - expect( - new EventsQuery(legacyEventIndexPattern, 'awesome-id', { - size: 1, - timestamp, - eventID: 'foo', - }).build('5') - ).toStrictEqual({ - body: { - query: { - bool: { - filter: [ - { - terms: { 'endgame.unique_pid': ['5'] }, - }, - { - term: { 'agent.id': 'awesome-id' }, - }, - { - term: { 'event.kind': 'event' }, - }, - { - bool: { - must_not: { - term: { 'event.category': 'process' }, - }, - }, - }, - ], - }, - }, - aggs: { - totals: { - terms: { - field: 'endgame.unique_pid', - size: 1, - }, - }, - }, - search_after: [timestamp, 'foo'], - size: 1, - sort: [{ '@timestamp': 'asc' }, { 'endgame.serial_event_id': 'asc' }], - }, - index: legacyEventIndexPattern, +describe('Events query', () => { + it('constructs a legacy multi search query', () => { + const query = new EventsQuery(new PaginationBuilder(1), 'index-pattern', 'endpointID'); + // using any here because otherwise ts complains that it doesn't know what bool and filter are + // eslint-disable-next-line @typescript-eslint/no-explicit-any + const msearch: any = query.buildMSearch('1234'); + expect(msearch[0].index).toBe(legacyEventIndexPattern); + expect(msearch[1].query.bool.filter[0]).toStrictEqual({ + terms: { 'endgame.unique_pid': ['1234'] }, }); }); - it('generates the correct non-legacy queries', () => { - const timestamp = new Date().getTime(); - - expect( - new EventsQuery(fakeEventIndexPattern, undefined, { - size: 1, - timestamp, - eventID: 'bar', - }).build('baz') - ).toStrictEqual({ - body: { - query: { - bool: { - filter: [ - { - terms: { 'process.entity_id': ['baz'] }, - }, - { - term: { 'event.kind': 'event' }, - }, - { - bool: { - must_not: { - term: { 'event.category': 'process' }, - }, - }, - }, - ], - }, - }, - aggs: { - totals: { - terms: { - field: 'process.entity_id', - size: 1, - }, - }, - }, - search_after: [timestamp, 'bar'], - size: 1, - sort: [{ '@timestamp': 'asc' }, { 'event.id': 'asc' }], - }, - index: fakeEventIndexPattern, + it('constructs a non-legacy multi search query', () => { + const query = new EventsQuery(new PaginationBuilder(1), 'index-pattern'); + // using any here because otherwise ts complains that it doesn't know what bool and filter are + // eslint-disable-next-line @typescript-eslint/no-explicit-any + const msearch: any = query.buildMSearch(['1234', '5678']); + expect(msearch[0].index).toBe('index-pattern'); + expect(msearch[1].query.bool.filter[0]).toStrictEqual({ + terms: { 'process.entity_id': ['1234', '5678'] }, }); }); }); diff --git a/x-pack/plugins/siem/server/endpoint/routes/resolver/queries/events.ts b/x-pack/plugins/siem/server/endpoint/routes/resolver/queries/events.ts index 80c3a0e9acccc..e14b222500d7c 100644 --- a/x-pack/plugins/siem/server/endpoint/routes/resolver/queries/events.ts +++ b/x-pack/plugins/siem/server/endpoint/routes/resolver/queries/events.ts @@ -3,66 +3,85 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ +import { SearchResponse } from 'elasticsearch'; +import { ResolverEvent } from '../../../../../common/endpoint/types'; import { ResolverQuery } from './base'; +import { PaginationBuilder, PaginatedResults } from '../utils/pagination'; import { JsonObject } from '../../../../../../../../src/plugins/kibana_utils/public'; -export class EventsQuery extends ResolverQuery { - protected legacyQuery(endpointID: string, uniquePIDs: string[], index: string): JsonObject { - const paginator = this.paginateBy('endgame.serial_event_id', 'endgame.unique_pid'); +/** + * Builds a query for retrieving related events for a node. + */ +export class EventsQuery extends ResolverQuery { + constructor( + private readonly pagination: PaginationBuilder, + indexPattern: string, + endpointID?: string + ) { + super(indexPattern, endpointID); + } + + protected legacyQuery(endpointID: string, uniquePIDs: string[]): JsonObject { return { - body: paginator({ - query: { - bool: { - filter: [ - { - terms: { 'endgame.unique_pid': uniquePIDs }, - }, - { - term: { 'agent.id': endpointID }, - }, - { - term: { 'event.kind': 'event' }, - }, - { - bool: { - must_not: { - term: { 'event.category': 'process' }, - }, + query: { + bool: { + filter: [ + { + terms: { 'endgame.unique_pid': uniquePIDs }, + }, + { + term: { 'agent.id': endpointID }, + }, + { + term: { 'event.kind': 'event' }, + }, + { + bool: { + must_not: { + term: { 'event.category': 'process' }, }, }, - ], - }, + }, + ], }, - }), - index, + }, + ...this.pagination.buildQueryFields( + uniquePIDs.length, + 'endgame.serial_event_id', + 'endgame.unique_pid' + ), }; } - protected query(entityIDs: string[], index: string): JsonObject { - const paginator = this.paginateBy('event.id', 'process.entity_id'); + protected query(entityIDs: string[]): JsonObject { return { - body: paginator({ - query: { - bool: { - filter: [ - { - terms: { 'process.entity_id': entityIDs }, - }, - { - term: { 'event.kind': 'event' }, - }, - { - bool: { - must_not: { - term: { 'event.category': 'process' }, - }, + query: { + bool: { + filter: [ + { + terms: { 'process.entity_id': entityIDs }, + }, + { + term: { 'event.kind': 'event' }, + }, + { + bool: { + must_not: { + term: { 'event.category': 'process' }, }, }, - ], - }, + }, + ], }, - }), - index, + }, + ...this.pagination.buildQueryFields(entityIDs.length, 'event.id', 'process.entity_id'), + }; + } + + formatResponse(response: SearchResponse): PaginatedResults { + return { + results: ResolverQuery.getResults(response), + totals: PaginationBuilder.getTotals(response.aggregations), }; } } diff --git a/x-pack/plugins/siem/server/endpoint/routes/resolver/queries/lifecycle.test.ts b/x-pack/plugins/siem/server/endpoint/routes/resolver/queries/lifecycle.test.ts index 296135af83b72..f0a7f3bfa59c2 100644 --- a/x-pack/plugins/siem/server/endpoint/routes/resolver/queries/lifecycle.test.ts +++ b/x-pack/plugins/siem/server/endpoint/routes/resolver/queries/lifecycle.test.ts @@ -3,62 +3,29 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ - import { LifecycleQuery } from './lifecycle'; -import { fakeEventIndexPattern } from './children.test'; import { legacyEventIndexPattern } from './legacy_event_index_pattern'; -describe('lifecycle query', () => { - it('generates the correct legacy queries', () => { - expect(new LifecycleQuery(legacyEventIndexPattern, 'awesome-id').build('5')).toStrictEqual({ - body: { - query: { - bool: { - filter: [ - { - terms: { 'endgame.unique_pid': ['5'] }, - }, - { - term: { 'agent.id': 'awesome-id' }, - }, - { - term: { 'event.kind': 'event' }, - }, - { - term: { 'event.category': 'process' }, - }, - ], - }, - }, - size: 10000, - sort: [{ '@timestamp': 'asc' }], - }, - index: legacyEventIndexPattern, +describe('Lifecycle query', () => { + it('constructs a legacy multi search query', () => { + const query = new LifecycleQuery('index-pattern', 'endpointID'); + // using any here because otherwise ts complains that it doesn't know what bool and filter are + // eslint-disable-next-line @typescript-eslint/no-explicit-any + const msearch: any = query.buildMSearch('1234'); + expect(msearch[0].index).toBe(legacyEventIndexPattern); + expect(msearch[1].query.bool.filter[0]).toStrictEqual({ + terms: { 'endgame.unique_pid': ['1234'] }, }); }); - it('generates the correct non-legacy queries', () => { - expect(new LifecycleQuery(fakeEventIndexPattern).build('baz')).toStrictEqual({ - body: { - query: { - bool: { - filter: [ - { - terms: { 'process.entity_id': ['baz'] }, - }, - { - term: { 'event.kind': 'event' }, - }, - { - term: { 'event.category': 'process' }, - }, - ], - }, - }, - size: 10000, - sort: [{ '@timestamp': 'asc' }], - }, - index: fakeEventIndexPattern, + it('constructs a non-legacy multi search query', () => { + const query = new LifecycleQuery('index-pattern'); + // using any here because otherwise ts complains that it doesn't know what bool and filter are + // eslint-disable-next-line @typescript-eslint/no-explicit-any + const msearch: any = query.buildMSearch(['1234', '5678']); + expect(msearch[0].index).toBe('index-pattern'); + expect(msearch[1].query.bool.filter[0]).toStrictEqual({ + terms: { 'process.entity_id': ['1234', '5678'] }, }); }); }); diff --git a/x-pack/plugins/siem/server/endpoint/routes/resolver/queries/lifecycle.ts b/x-pack/plugins/siem/server/endpoint/routes/resolver/queries/lifecycle.ts index 7dbbdec2fdfcd..74fe44f39615c 100644 --- a/x-pack/plugins/siem/server/endpoint/routes/resolver/queries/lifecycle.ts +++ b/x-pack/plugins/siem/server/endpoint/routes/resolver/queries/lifecycle.ts @@ -3,60 +3,63 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ +import { SearchResponse } from 'elasticsearch'; import { ResolverQuery } from './base'; import { JsonObject } from '../../../../../../../../src/plugins/kibana_utils/public'; +import { ResolverEvent } from '../../../../../common/endpoint/types'; -export class LifecycleQuery extends ResolverQuery { - protected legacyQuery(endpointID: string, uniquePIDs: string[], index: string): JsonObject { +/** + * Builds a query for retrieving life cycle information about a node (start, stop, etc). + */ +export class LifecycleQuery extends ResolverQuery { + protected legacyQuery(endpointID: string, uniquePIDs: string[]): JsonObject { return { - body: { - query: { - bool: { - filter: [ - { - terms: { 'endgame.unique_pid': uniquePIDs }, - }, - { - term: { 'agent.id': endpointID }, - }, - { - term: { 'event.kind': 'event' }, - }, - { - term: { 'event.category': 'process' }, - }, - ], - }, + query: { + bool: { + filter: [ + { + terms: { 'endgame.unique_pid': uniquePIDs }, + }, + { + term: { 'agent.id': endpointID }, + }, + { + term: { 'event.kind': 'event' }, + }, + { + term: { 'event.category': 'process' }, + }, + ], }, - size: 10000, - sort: [{ '@timestamp': 'asc' }], }, - index, + size: 10000, + sort: [{ '@timestamp': 'asc' }], }; } - protected query(entityIDs: string[], index: string): JsonObject { + protected query(entityIDs: string[]): JsonObject { return { - body: { - query: { - bool: { - filter: [ - { - terms: { 'process.entity_id': entityIDs }, - }, - { - term: { 'event.kind': 'event' }, - }, - { - term: { 'event.category': 'process' }, - }, - ], - }, + query: { + bool: { + filter: [ + { + terms: { 'process.entity_id': entityIDs }, + }, + { + term: { 'event.kind': 'event' }, + }, + { + term: { 'event.category': 'process' }, + }, + ], }, - size: 10000, - sort: [{ '@timestamp': 'asc' }], }, - index, + size: 10000, + sort: [{ '@timestamp': 'asc' }], }; } + + formatResponse(response: SearchResponse): ResolverEvent[] { + return ResolverQuery.getResults(response); + } } diff --git a/x-pack/plugins/siem/server/endpoint/routes/resolver/queries/multi_searcher.ts b/x-pack/plugins/siem/server/endpoint/routes/resolver/queries/multi_searcher.ts new file mode 100644 index 0000000000000..7f55fafeafb59 --- /dev/null +++ b/x-pack/plugins/siem/server/endpoint/routes/resolver/queries/multi_searcher.ts @@ -0,0 +1,77 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { IScopedClusterClient } from 'kibana/server'; +import { MSearchResponse } from 'elasticsearch'; +import { ResolverEvent } from '../../../../../common/endpoint/types'; +import { JsonObject } from '../../../../../../../../src/plugins/kibana_utils/public'; + +/** + * Contract for queries to be compatible with ES multi search api + */ +export interface MSearchQuery { + /** + * Builds an array of header and body pairs for use in a multi search + * + * @param ids one or many unique identifiers for nodes. + * @returns an array of header and body pairs describing multi search queries + */ + buildMSearch(ids: string | string[]): JsonObject[]; +} + +/** + * Contract for adding a query for multi search + */ +export interface QueryInfo { + /** + * A multi search query + */ + query: MSearchQuery; + /** + * one or many unique identifiers to be searched for in this query + */ + ids: string | string[]; +} + +/** + * Executes a multi search within ES. + * + * More info on multi search here: + * https://www.elastic.co/guide/en/elasticsearch/reference/current/search-multi-search.html + * https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/msearch_examples.html + */ +export class MultiSearcher { + constructor(private readonly client: IScopedClusterClient) {} + + /** + * Perform the multi search on the passed in queries + * + * @param queries multi search queries + * @returns an array of SearchResponse + */ + async search(queries: QueryInfo[]) { + if (queries.length === 0) { + throw new Error('No queries provided to MultiSearcher'); + } + + let searchQuery: JsonObject[] = []; + queries.forEach( + (info) => (searchQuery = [...searchQuery, ...info.query.buildMSearch(info.ids)]) + ); + const res: MSearchResponse = await this.client.callAsCurrentUser('msearch', { + body: searchQuery, + }); + + if (!res.responses) { + throw new Error('No response from Elasticsearch'); + } + + if (res.responses.length !== queries.length) { + throw new Error(`Responses length was: ${res.responses.length} expected ${queries.length}`); + } + return res.responses; + } +} diff --git a/x-pack/plugins/siem/server/endpoint/routes/resolver/queries/stats.test.ts b/x-pack/plugins/siem/server/endpoint/routes/resolver/queries/stats.test.ts index 17a158aec7cf5..b6e8c6cdc08de 100644 --- a/x-pack/plugins/siem/server/endpoint/routes/resolver/queries/stats.test.ts +++ b/x-pack/plugins/siem/server/endpoint/routes/resolver/queries/stats.test.ts @@ -3,186 +3,29 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -import { legacyEventIndexPattern } from './legacy_event_index_pattern'; import { StatsQuery } from './stats'; -import { fakeEventIndexPattern } from './children.test'; +import { legacyEventIndexPattern } from './legacy_event_index_pattern'; -describe('stats query', () => { - it('generates the correct legacy queries', () => { - expect(new StatsQuery(legacyEventIndexPattern, 'awesome-id').build('5')).toStrictEqual({ - body: { - size: 0, - query: { - bool: { - filter: [ - { - term: { - 'agent.id': 'awesome-id', - }, - }, - { - bool: { - should: [ - { - bool: { - filter: [ - { - term: { - 'event.kind': 'event', - }, - }, - { - terms: { - 'endgame.unique_pid': ['5'], - }, - }, - { - bool: { - must_not: { - term: { - 'event.category': 'process', - }, - }, - }, - }, - ], - }, - }, - { - bool: { - filter: [ - { - term: { - 'event.kind': 'alert', - }, - }, - { - terms: { - 'endgame.data.alert_details.acting_process.unique_pid': ['5'], - }, - }, - ], - }, - }, - ], - }, - }, - ], - }, - }, - aggs: { - alerts: { - filter: { - term: { - 'event.kind': 'alert', - }, - }, - aggs: { - ids: { - terms: { - field: 'endgame.data.alert_details.acting_process.unique_pid', - }, - }, - }, - }, - events: { - filter: { - term: { - 'event.kind': 'event', - }, - }, - aggs: { - ids: { - terms: { - field: 'endgame.unique_pid', - }, - }, - }, - }, - }, - }, - index: legacyEventIndexPattern, +describe('Stats query', () => { + it('constructs a legacy multi search query', () => { + const query = new StatsQuery('index-pattern', 'endpointID'); + // using any here because otherwise ts complains that it doesn't know what bool and filter are + // eslint-disable-next-line @typescript-eslint/no-explicit-any + const msearch: any = query.buildMSearch('1234'); + expect(msearch[0].index).toBe(legacyEventIndexPattern); + expect(msearch[1].query.bool.filter[1].bool.should[1].bool.filter[1]).toStrictEqual({ + terms: { 'endgame.data.alert_details.acting_process.unique_pid': ['1234'] }, }); }); - it('generates the correct non-legacy queries', () => { - expect(new StatsQuery(fakeEventIndexPattern).build('baz')).toStrictEqual({ - body: { - size: 0, - query: { - bool: { - filter: [ - { - terms: { - 'process.entity_id': ['baz'], - }, - }, - { - bool: { - should: [ - { - bool: { - filter: [ - { - term: { - 'event.kind': 'event', - }, - }, - { - bool: { - must_not: { - term: { - 'event.category': 'process', - }, - }, - }, - }, - ], - }, - }, - { - term: { - 'event.kind': 'alert', - }, - }, - ], - }, - }, - ], - }, - }, - aggs: { - alerts: { - filter: { - term: { - 'event.kind': 'alert', - }, - }, - aggs: { - ids: { - terms: { - field: 'process.entity_id', - }, - }, - }, - }, - events: { - filter: { - term: { - 'event.kind': 'event', - }, - }, - aggs: { - ids: { - terms: { - field: 'process.entity_id', - }, - }, - }, - }, - }, - }, - index: fakeEventIndexPattern, + it('constructs a non-legacy multi search query', () => { + const query = new StatsQuery('index-pattern'); + // using any here because otherwise ts complains that it doesn't know what bool and filter are + // eslint-disable-next-line @typescript-eslint/no-explicit-any + const msearch: any = query.buildMSearch(['1234', '5678']); + expect(msearch[0].index).toBe('index-pattern'); + expect(msearch[1].query.bool.filter[0]).toStrictEqual({ + terms: { 'process.entity_id': ['1234', '5678'] }, }); }); }); diff --git a/x-pack/plugins/siem/server/endpoint/routes/resolver/queries/stats.ts b/x-pack/plugins/siem/server/endpoint/routes/resolver/queries/stats.ts index 5fddf86ea4a7c..a1bab707879a5 100644 --- a/x-pack/plugins/siem/server/endpoint/routes/resolver/queries/stats.ts +++ b/x-pack/plugins/siem/server/endpoint/routes/resolver/queries/stats.ts @@ -7,143 +7,142 @@ import { SearchResponse } from 'elasticsearch'; import { ResolverQuery } from './base'; import { ResolverEvent } from '../../../../../common/endpoint/types'; import { JsonObject } from '../../../../../../../../src/plugins/kibana_utils/public'; -import { PaginatedResults } from '../utils/pagination'; +import { AggBucket } from '../utils/pagination'; -export class StatsQuery extends ResolverQuery { - protected postSearch(response: SearchResponse): PaginatedResults { - const alerts = response.aggregations.alerts.ids.buckets.reduce( - // eslint-disable-next-line @typescript-eslint/no-explicit-any - (cummulative: any, bucket: any) => ({ ...cummulative, [bucket.key]: bucket.doc_count }), - {} - ); - const events = response.aggregations.events.ids.buckets.reduce( - // eslint-disable-next-line @typescript-eslint/no-explicit-any - (cummulative: any, bucket: any) => ({ ...cummulative, [bucket.key]: bucket.doc_count }), - {} - ); - return { - totals: {}, - results: [], - extras: { - alerts, - events, - }, - }; - } +export interface StatsResult { + alerts: Record; + events: Record; +} - protected legacyQuery(endpointID: string, uniquePIDs: string[], index: string): JsonObject { +export class StatsQuery extends ResolverQuery { + protected legacyQuery(endpointID: string, uniquePIDs: string[]): JsonObject { return { - body: { - size: 0, - query: { - bool: { - filter: [ - { - term: { 'agent.id': endpointID }, - }, - { - bool: { - should: [ - { - bool: { - filter: [ - { term: { 'event.kind': 'event' } }, - { terms: { 'endgame.unique_pid': uniquePIDs } }, - { - bool: { - must_not: { - term: { 'event.category': 'process' }, - }, + size: 0, + query: { + bool: { + filter: [ + { + term: { 'agent.id': endpointID }, + }, + { + bool: { + should: [ + { + bool: { + filter: [ + { term: { 'event.kind': 'event' } }, + { terms: { 'endgame.unique_pid': uniquePIDs } }, + { + bool: { + must_not: { + term: { 'event.category': 'process' }, }, }, - ], - }, + }, + ], }, - { - bool: { - filter: [ - { term: { 'event.kind': 'alert' } }, - { - terms: { - 'endgame.data.alert_details.acting_process.unique_pid': uniquePIDs, - }, + }, + { + bool: { + filter: [ + { term: { 'event.kind': 'alert' } }, + { + terms: { + 'endgame.data.alert_details.acting_process.unique_pid': uniquePIDs, }, - ], - }, + }, + ], }, - ], - }, + }, + ], }, - ], - }, - }, - aggs: { - alerts: { - filter: { term: { 'event.kind': 'alert' } }, - aggs: { - ids: { terms: { field: 'endgame.data.alert_details.acting_process.unique_pid' } }, }, + ], + }, + }, + aggs: { + alerts: { + filter: { term: { 'event.kind': 'alert' } }, + aggs: { + ids: { terms: { field: 'endgame.data.alert_details.acting_process.unique_pid' } }, }, - events: { - filter: { term: { 'event.kind': 'event' } }, - aggs: { - ids: { terms: { field: 'endgame.unique_pid' } }, - }, + }, + events: { + filter: { term: { 'event.kind': 'event' } }, + aggs: { + ids: { terms: { field: 'endgame.unique_pid' } }, }, }, }, - index, }; } - protected query(entityIDs: string[], index: string): JsonObject { + protected query(entityIDs: string[]): JsonObject { return { - body: { - size: 0, - query: { - bool: { - filter: [ - { terms: { 'process.entity_id': entityIDs } }, - { - bool: { - should: [ - { - bool: { - filter: [ - { term: { 'event.kind': 'event' } }, - { - bool: { - must_not: { - term: { 'event.category': 'process' }, - }, + size: 0, + query: { + bool: { + filter: [ + { terms: { 'process.entity_id': entityIDs } }, + { + bool: { + should: [ + { + bool: { + filter: [ + { term: { 'event.kind': 'event' } }, + { + bool: { + must_not: { + term: { 'event.category': 'process' }, }, }, - ], - }, + }, + ], }, - { term: { 'event.kind': 'alert' } }, - ], - }, + }, + { term: { 'event.kind': 'alert' } }, + ], }, - ], - }, - }, - aggs: { - alerts: { - filter: { term: { 'event.kind': 'alert' } }, - aggs: { - ids: { terms: { field: 'process.entity_id' } }, }, + ], + }, + }, + aggs: { + alerts: { + filter: { term: { 'event.kind': 'alert' } }, + aggs: { + ids: { terms: { field: 'process.entity_id' } }, }, - events: { - filter: { term: { 'event.kind': 'event' } }, - aggs: { - ids: { terms: { field: 'process.entity_id' } }, - }, + }, + events: { + filter: { term: { 'event.kind': 'event' } }, + aggs: { + ids: { terms: { field: 'process.entity_id' } }, }, }, }, - index, + }; + } + + public formatResponse(response: SearchResponse): StatsResult { + const alerts = response.aggregations.alerts.ids.buckets.reduce( + (cummulative: Record, bucket: AggBucket) => ({ + ...cummulative, + [bucket.key]: bucket.doc_count, + }), + {} + ); + const events = response.aggregations.events.ids.buckets.reduce( + (cummulative: Record, bucket: AggBucket) => ({ + ...cummulative, + [bucket.key]: bucket.doc_count, + }), + {} + ); + return { + alerts, + events, }; } } diff --git a/x-pack/plugins/siem/server/endpoint/routes/resolver/tree.ts b/x-pack/plugins/siem/server/endpoint/routes/resolver/tree.ts index 3551123393960..b0c8b4411991e 100644 --- a/x-pack/plugins/siem/server/endpoint/routes/resolver/tree.ts +++ b/x-pack/plugins/siem/server/endpoint/routes/resolver/tree.ts @@ -34,11 +34,14 @@ export function handleTree( const indexPattern = await indexRetriever.getEventIndexPattern(context); const fetcher = new Fetcher(client, id, indexPattern, endpointID); - const tree = await Tree.merge( + + const [childrenNodes, ancestry, relatedEvents] = await Promise.all([ fetcher.children(children, generations, afterChild), - fetcher.ancestors(ancestors + 1), - fetcher.events(events, afterEvent) - ); + fetcher.ancestors(ancestors), + fetcher.events(events, afterEvent), + ]); + + const tree = new Tree(id, { ancestry, children: childrenNodes, relatedEvents }); const enrichedTree = await fetcher.stats(tree); diff --git a/x-pack/plugins/siem/server/endpoint/routes/resolver/utils/children_helper.test.ts b/x-pack/plugins/siem/server/endpoint/routes/resolver/utils/children_helper.test.ts new file mode 100644 index 0000000000000..11f3dd69b3f95 --- /dev/null +++ b/x-pack/plugins/siem/server/endpoint/routes/resolver/utils/children_helper.test.ts @@ -0,0 +1,81 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +import _ from 'lodash'; + +import { EndpointDocGenerator } from '../../../../../common/endpoint/generate_data'; +import { ChildrenNodesHelper } from './children_helper'; +import { eventId, entityId, parentEntityId } from '../../../../../common/endpoint/models/event'; +import { ResolverEvent, ResolverChildren } from '../../../../../common/endpoint/types'; + +function findParents(events: ResolverEvent[]): ResolverEvent[] { + const cache = _.groupBy(events, entityId); + + const parents: ResolverEvent[] = []; + Object.values(cache).forEach((lifecycle) => { + const parentNode = cache[parentEntityId(lifecycle[0])!]; + if (parentNode) { + parents.push(parentNode[0]); + } + }); + return parents; +} + +function findNode(tree: ResolverChildren, id: string) { + return tree.childNodes.find((node) => { + return node.entityID === id; + }); +} + +describe('Children helper', () => { + const generator = new EndpointDocGenerator(); + const root = generator.generateEvent(); + + it('builds the children response structure', () => { + const children = Array.from(generator.descendantsTreeGenerator(root, 3, 3, 0, 0, 100, true)); + + // because we requested the generator to always return the max children, there will always be at least 2 parents + const parents = findParents(children); + + // this represents the aggregation returned from elastic search + // each node in the tree should have 3 children, so if these values are greater than 3 there should be + // pagination cursors created for those children + const totals = { + [root.process.entity_id]: 100, + [entityId(parents[0])]: 10, + [entityId(parents[1])]: 0, + }; + + const helper = new ChildrenNodesHelper(root.process.entity_id); + helper.addChildren(totals, children); + const tree = helper.getNodes(); + expect(tree.nextChild).not.toBeNull(); + + let parent = findNode(tree, entityId(parents[0])); + expect(parent?.nextChild).not.toBeNull(); + parent = findNode(tree, entityId(parents[1])); + expect(parent?.nextChild).toBeNull(); + + tree.childNodes.forEach((node) => { + node.lifecycle.forEach((event) => { + expect(children.find((child) => child.event.id === eventId(event))).toEqual(event); + }); + }); + }); + + it('builds the children response structure twice', () => { + const children = Array.from(generator.descendantsTreeGenerator(root, 3, 3, 0, 0, 100)); + const helper = new ChildrenNodesHelper(root.process.entity_id); + helper.addChildren({}, children); + helper.getNodes(); + + const tree = helper.getNodes(); + tree.childNodes.forEach((node) => { + node.lifecycle.forEach((event) => { + expect(children.find((child) => child.event.id === eventId(event))).toEqual(event); + }); + }); + }); +}); diff --git a/x-pack/plugins/siem/server/endpoint/routes/resolver/utils/children_helper.ts b/x-pack/plugins/siem/server/endpoint/routes/resolver/utils/children_helper.ts new file mode 100644 index 0000000000000..7a3e1fc591e82 --- /dev/null +++ b/x-pack/plugins/siem/server/endpoint/routes/resolver/utils/children_helper.ts @@ -0,0 +1,93 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { + entityId, + parentEntityId, + isProcessStart, +} from '../../../../../common/endpoint/models/event'; +import { ChildNode, ResolverEvent, ResolverChildren } from '../../../../../common/endpoint/types'; +import { PaginationBuilder } from './pagination'; +import { createChild } from './node'; + +/** + * This class helps construct the children structure when building a resolver tree. + */ +export class ChildrenNodesHelper { + private readonly cache: Map = new Map(); + + constructor(private readonly rootID: string) { + this.cache.set(rootID, createChild(rootID)); + } + + /** + * Constructs a ResolverChildren response based on the children that were previously add. + */ + getNodes(): ResolverChildren { + const cacheCopy: Map = new Map(this.cache); + const rootNode = cacheCopy.get(this.rootID); + let rootNextChild = null; + + if (rootNode) { + rootNextChild = rootNode.nextChild; + } + + cacheCopy.delete(this.rootID); + return { + childNodes: Array.from(cacheCopy.values()), + nextChild: rootNextChild, + }; + } + + /** + * Add children to the cache. + * + * @param totals a map of unique node IDs to total number of child nodes + * @param results events from a children query + */ + addChildren(totals: Record, results: ResolverEvent[]) { + const startEventsCache: Map = new Map(); + + results.forEach((event) => { + const entityID = entityId(event); + const parentID = parentEntityId(event); + if (!entityID || !parentID) { + return; + } + + let cachedChild = this.cache.get(entityID); + if (!cachedChild) { + cachedChild = createChild(entityID); + this.cache.set(entityID, cachedChild); + } + cachedChild.lifecycle.push(event); + + if (isProcessStart(event)) { + let startEvents = startEventsCache.get(parentID); + if (startEvents === undefined) { + startEvents = []; + startEventsCache.set(parentID, startEvents); + } + startEvents.push(event); + } + }); + + this.addChildrenPagination(startEventsCache, totals); + } + + private addChildrenPagination( + startEventsCache: Map, + totals: Record + ) { + Object.entries(totals).forEach(([parentID, total]) => { + const parentNode = this.cache.get(parentID); + const childrenStartEvents = startEventsCache.get(parentID); + if (parentNode && childrenStartEvents) { + parentNode.nextChild = PaginationBuilder.buildCursor(total, childrenStartEvents); + } + }); + } +} diff --git a/x-pack/plugins/siem/server/endpoint/routes/resolver/utils/fetch.ts b/x-pack/plugins/siem/server/endpoint/routes/resolver/utils/fetch.ts index c7d7713082df0..4b14c555d49b7 100644 --- a/x-pack/plugins/siem/server/endpoint/routes/resolver/utils/fetch.ts +++ b/x-pack/plugins/siem/server/endpoint/routes/resolver/utils/fetch.ts @@ -5,117 +5,176 @@ */ import { IScopedClusterClient } from 'kibana/server'; +import { + ResolverChildren, + ResolverRelatedEvents, + ResolverAncestry, +} from '../../../../../common/endpoint/types'; import { entityId, parentEntityId } from '../../../../../common/endpoint/models/event'; -import { getPaginationParams } from './pagination'; +import { PaginationBuilder } from './pagination'; import { Tree } from './tree'; import { LifecycleQuery } from '../queries/lifecycle'; import { ChildrenQuery } from '../queries/children'; import { EventsQuery } from '../queries/events'; import { StatsQuery } from '../queries/stats'; +import { createAncestry, createRelatedEvents, createLifecycle } from './node'; +import { ChildrenNodesHelper } from './children_helper'; +/** + * Handles retrieving nodes of a resolver tree. + */ export class Fetcher { constructor( private readonly client: IScopedClusterClient, + /** + * The anchoring origin for the tree. + */ private readonly id: string, + /** + * Index pattern for searching ES + */ private readonly indexPattern: string, + /** + * This is used for searching legacy events + */ private readonly endpointID?: string ) {} - public async ancestors(limit: number): Promise { - const tree = new Tree(this.id); - await this.doAncestors(tree, this.id, this.id, limit); - return tree; + /** + * Retrieves the ancestor nodes for the resolver tree. + * + * @param limit upper limit of ancestors to retrieve + */ + public async ancestors(limit: number): Promise { + const root = createAncestry(); + await this.doAncestors(this.id, limit + 1, root); + return root; } - public async children(limit: number, generations: number, after?: string): Promise { - const tree = new Tree(this.id); - await this.doChildren(tree, [this.id], limit, generations, after); - return tree; + /** + * Retrieves the children nodes for the resolver tree. + * + * @param limit the number of children to retrieve for a single level + * @param generations number of levels to return + * @param after a cursor to use as the starting point for retrieving children + */ + public async children( + limit: number, + generations: number, + after?: string + ): Promise { + const helper = new ChildrenNodesHelper(this.id); + + await this.doChildren(helper, [this.id], limit, generations, after); + + return helper.getNodes(); } - public async events(limit: number, after?: string): Promise { - const tree = new Tree(this.id); - await this.doEvents(tree, limit, after); - return tree; + /** + * Retrieves the related events for the origin node. + * + * @param limit the upper bound number of related events to return + * @param after a cursor to use as the starting point for retrieving related events + */ + public async events(limit: number, after?: string): Promise { + return this.doEvents(limit, after); } + /** + * Enriches a resolver tree with statistics for how many related events and alerts exist for each node in the tree. + * + * @param tree a resolver tree to enrich with statistical information. + */ public async stats(tree: Tree): Promise { await this.doStats(tree); return tree; } - private async doAncestors(tree: Tree, curNode: string, previousNode: string, levels: number) { + private async doAncestors( + curNodeID: string, + levels: number, + ancestorInfo: ResolverAncestry + ): Promise { if (levels === 0) { - tree.setNextAncestor(curNode); + ancestorInfo.nextAncestor = curNodeID; return; } const query = new LifecycleQuery(this.indexPattern, this.endpointID); - const { results } = await query.search(this.client, curNode); + const results = await query.search(this.client, curNodeID); if (results.length === 0) { - tree.setNextAncestor(null); return; } - tree.addAncestor(previousNode, ...results); + ancestorInfo.ancestors.push(createLifecycle(curNodeID, results)); const next = parentEntityId(results[0]); - if (next !== undefined) { - await this.doAncestors(tree, next, curNode, levels - 1); + if (next === undefined) { + return; } + await this.doAncestors(next, levels - 1, ancestorInfo); } - private async doEvents(tree: Tree, limit: number, after?: string) { + private async doEvents(limit: number, after?: string) { const query = new EventsQuery( + PaginationBuilder.createBuilder(limit, after), this.indexPattern, - this.endpointID, - getPaginationParams(limit, after) + this.endpointID ); const { totals, results } = await query.search(this.client, this.id); - tree.addEvent(...results); - tree.paginateEvents(totals, results); - if (results.length === 0) tree.setNextEvent(null); + if (results.length === 0) { + // return an empty set of results + return createRelatedEvents(this.id); + } + if (!totals[this.id]) { + throw new Error(`Could not find the totals for related events entity_id: ${this.id}`); + } + + return createRelatedEvents( + this.id, + results, + PaginationBuilder.buildCursor(totals[this.id], results) + ); } private async doChildren( - tree: Tree, + cache: ChildrenNodesHelper, ids: string[], limit: number, levels: number, after?: string ) { - if (levels === 0 || ids.length === 0) return; + if (levels === 0 || ids.length === 0) { + return; + } const childrenQuery = new ChildrenQuery( + PaginationBuilder.createBuilder(limit, after), this.indexPattern, - this.endpointID, - getPaginationParams(limit, after) + this.endpointID ); const lifecycleQuery = new LifecycleQuery(this.indexPattern, this.endpointID); - const { totals, results } = await childrenQuery.search(this.client, ...ids); + const { totals, results } = await childrenQuery.search(this.client, ids); if (results.length === 0) { - tree.markLeafNode(...ids); return; } const childIDs = results.map(entityId); - const children = (await lifecycleQuery.search(this.client, ...childIDs)).results; + const children = await lifecycleQuery.search(this.client, childIDs); - tree.addChild(...children); - tree.paginateChildren(totals, results); - tree.markLeafNode(...childIDs); + cache.addChildren(totals, children); - await this.doChildren(tree, childIDs, limit * limit, levels - 1); + await this.doChildren(cache, childIDs, limit, levels - 1); } private async doStats(tree: Tree) { const statsQuery = new StatsQuery(this.indexPattern, this.endpointID); const ids = tree.ids(); - const { extras } = await statsQuery.search(this.client, ...ids); - const alerts = extras?.alerts || {}; - const events = extras?.events || {}; + const res = await statsQuery.search(this.client, ids); + const alerts = res?.alerts || {}; + const events = res?.events || {}; ids.forEach((id) => { tree.addStats(id, { totalAlerts: alerts[id] || 0, totalEvents: events[id] || 0 }); }); diff --git a/x-pack/plugins/siem/server/endpoint/routes/resolver/utils/node.ts b/x-pack/plugins/siem/server/endpoint/routes/resolver/utils/node.ts new file mode 100644 index 0000000000000..ae078b5368a96 --- /dev/null +++ b/x-pack/plugins/siem/server/endpoint/routes/resolver/utils/node.ts @@ -0,0 +1,87 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { + ResolverEvent, + ResolverAncestry, + LifecycleNode, + ResolverRelatedEvents, + ResolverTree, + ChildNode, +} from '../../../../../common/endpoint/types'; + +/** + * Creates a related event object that the related events handler would return + * + * @param entityID the entity_id for these related events + * @param events array of related events + * @param nextEvent the cursor to retrieve the next related event + */ +export function createRelatedEvents( + entityID: string, + events: ResolverEvent[] = [], + nextEvent: string | null = null +): ResolverRelatedEvents { + return { entityID, events, nextEvent }; +} + +/** + * Creates a child node that would be used in the child handler response + * + * @param entityID the entity_id of the child + */ +export function createChild(entityID: string): ChildNode { + const lifecycle = createLifecycle(entityID, []); + return { + ...lifecycle, + nextChild: null, + }; +} + +/** + * Creates an empty ancestry response structure. + */ +export function createAncestry(): ResolverAncestry { + return { ancestors: [], nextAncestor: null }; +} + +/** + * Creates a lifecycle node for use in the ancestry or child handlers + * + * @param id the entity_id that these lifecycle nodes should have + * @param lifecycle an array of lifecycle events + */ +export function createLifecycle(entityID: string, lifecycle: ResolverEvent[]): LifecycleNode { + return { entityID, lifecycle }; +} + +/** + * Creates an empty `Tree` response structure that the tree handler would return + * + * @param entityID the entity_id of the tree's origin node + */ +export function createTree(entityID: string): ResolverTree { + return { + entityID, + children: { + childNodes: [], + nextChild: null, + }, + relatedEvents: { + events: [], + nextEvent: null, + }, + lifecycle: [], + ancestry: { + ancestors: [], + nextAncestor: null, + }, + stats: { + totalAlerts: 0, + totalEvents: 0, + }, + }; +} diff --git a/x-pack/plugins/siem/server/endpoint/routes/resolver/utils/pagination.test.ts b/x-pack/plugins/siem/server/endpoint/routes/resolver/utils/pagination.test.ts new file mode 100644 index 0000000000000..74e4e252861e6 --- /dev/null +++ b/x-pack/plugins/siem/server/endpoint/routes/resolver/utils/pagination.test.ts @@ -0,0 +1,68 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +import { PaginationBuilder } from './pagination'; +import { EndpointDocGenerator } from '../../../../../common/endpoint/generate_data'; +import { EndpointEvent } from '../../../../../common/endpoint/types'; + +describe('Pagination', () => { + const generator = new EndpointDocGenerator(); + + const getSearchAfterInfo = (events: EndpointEvent[]) => { + const lastEvent = events[events.length - 1]; + return [lastEvent['@timestamp'], lastEvent.event.id]; + }; + describe('cursor', () => { + const root = generator.generateEvent(); + const events = Array.from(generator.relatedEventsGenerator(root, 5)); + + it('does not build a cursor when all events are present', () => { + expect(PaginationBuilder.buildCursor(0, events)).toBeNull(); + }); + + it('creates a cursor when not all events are present', () => { + expect(PaginationBuilder.buildCursor(events.length + 1, events)).not.toBeNull(); + }); + + it('creates a cursor with the right information', () => { + const cursor = PaginationBuilder.buildCursor(events.length + 1, events); + expect(cursor).not.toBeNull(); + // we are guaranteed that the cursor won't be null from the check above + const builder = PaginationBuilder.createBuilder(0, cursor!); + const fields = builder.buildQueryFields(0, '', ''); + expect(fields.search_after).toStrictEqual(getSearchAfterInfo(events)); + }); + }); + + describe('pagination builder', () => { + it('does not include the search after information when no cursor is provided', () => { + const builder = PaginationBuilder.createBuilder(100); + const fields = builder.buildQueryFields(1, '', ''); + expect(fields).not.toHaveProperty('search_after'); + }); + + it('returns no results when the aggregation does not exist in the response', () => { + expect(PaginationBuilder.getTotals()).toStrictEqual({}); + }); + + it('constructs the totals from the aggregation results', () => { + const agg = { + totals: { + buckets: [ + { + key: 'awesome', + doc_count: 5, + }, + { + key: 'soup', + doc_count: 1, + }, + ], + }, + }; + expect(PaginationBuilder.getTotals(agg)).toStrictEqual({ awesome: 5, soup: 1 }); + }); + }); +}); diff --git a/x-pack/plugins/siem/server/endpoint/routes/resolver/utils/pagination.ts b/x-pack/plugins/siem/server/endpoint/routes/resolver/utils/pagination.ts index a553679a4e653..9a852d47e0e85 100644 --- a/x-pack/plugins/siem/server/endpoint/routes/resolver/utils/pagination.ts +++ b/x-pack/plugins/siem/server/endpoint/routes/resolver/utils/pagination.ts @@ -4,22 +4,22 @@ * you may not use this file except in compliance with the Elastic License. */ -import { SearchResponse } from 'elasticsearch'; import { ResolverEvent } from '../../../../../common/endpoint/types'; -import { entityId } from '../../../../../common/endpoint/models/event'; +import { eventId } from '../../../../../common/endpoint/models/event'; import { JsonObject } from '../../../../../../../../src/plugins/kibana_utils/public'; -export interface PaginationParams { - size: number; - timestamp?: number; - eventID?: string; +/** + * Represents a single result bucket of an aggregation + */ +export interface AggBucket { + key: string; + doc_count: number; } -export interface PaginatedResults { - totals: Record; - results: ResolverEvent[]; - // content holder for any other extra aggregation counts - extras?: Record>; +interface TotalsAggregation { + totals?: { + buckets?: AggBucket[]; + }; } interface PaginationCursor { @@ -27,85 +27,142 @@ interface PaginationCursor { eventID: string; } -function urlEncodeCursor(data: PaginationCursor): string { - const value = JSON.stringify(data); - return Buffer.from(value, 'utf8') - .toString('base64') - .replace(/\+/g, '-') - .replace(/\//g, '_') - .replace(/=+$/g, ''); +/** + * The result structure of a query that leverages pagination. This includes totals that can be used to determine if + * additional nodes exist and additional queries need to be made to retrieve the nodes. + */ +export interface PaginatedResults { + /** + * Resulting events returned from the query. + */ + results: ResolverEvent[]; + /** + * Mapping of unique ID to total number of events that exist in ES. The events this references is scoped to the events + * that the query is searching for. + */ + totals: Record; } -function urlDecodeCursor(value: string): PaginationCursor { - const localValue = value.replace(/\-/g, '+').replace(/_/g, '/'); - const data = Buffer.from(localValue, 'base64').toString('utf8'); - const { timestamp, eventID } = JSON.parse(data); - // take some extra care to only grab the things we want - // convert the timestamp string to date object - return { timestamp, eventID }; -} +/** + * This class handles constructing pagination cursors that resolver can use to return additional events in subsequent + * queries. It also constructs an aggregation query to determine the totals for other queries. This class should be used + * with a query to build cursors for paginated results. + */ +export class PaginationBuilder { + constructor( + /** + * upper limit of how many results should be returned by the parent query. + */ + private readonly size: number, + /** + * timestamp that will be used in the search_after section + */ + private readonly timestamp?: number, + /** + * unique ID for the last event + */ + private readonly eventID?: string + ) {} -export function getPaginationParams(limit: number, after?: string): PaginationParams { - if (after) { - try { - const cursor = urlDecodeCursor(after); - if (cursor.timestamp && cursor.eventID) { - return { - size: limit, - timestamp: cursor.timestamp, - eventID: cursor.eventID, - }; - } - } catch (err) { - /* tslint:disable:no-empty */ - } // ignore invalid cursor values + private static urlEncodeCursor(data: PaginationCursor): string { + const value = JSON.stringify(data); + return Buffer.from(value, 'utf8') + .toString('base64') + .replace(/\+/g, '-') + .replace(/\//g, '_') + .replace(/=+$/g, ''); } - return { size: limit }; -} -export function paginate( - pagination: PaginationParams, - tiebreaker: string, - aggregator: string, - query: JsonObject -): JsonObject { - const { size, timestamp, eventID } = pagination; - query.sort = [{ '@timestamp': 'asc' }, { [tiebreaker]: 'asc' }]; - query.aggs = query.aggs || {}; - query.aggs = { - ...(typeof query.aggs === 'object' ? query.aggs : {}), - totals: { terms: { field: aggregator, size } }, - }; - query.size = size; - if (timestamp && eventID) { - query.search_after = [timestamp, eventID] as Array; + private static urlDecodeCursor(cursor: string): PaginationCursor { + const fixedCursor = cursor.replace(/\-/g, '+').replace(/_/g, '/'); + const data = Buffer.from(fixedCursor, 'base64').toString('utf8'); + const { timestamp, eventID } = JSON.parse(data); + // take some extra care to only grab the things we want + // convert the timestamp string to date object + return { timestamp, eventID }; } - return query; -} -export function buildPaginationCursor(total: number, results: ResolverEvent[]): string | null { - if (total > results.length && results.length > 0) { - const lastResult = results[results.length - 1]; - const cursor = { - timestamp: lastResult['@timestamp'], - eventID: entityId(lastResult), - }; - return urlEncodeCursor(cursor); + /** + * Constructs a cursor to use in subsequent queries to retrieve the next set of results. + * + * @param total the total events that exist in ES scoped for a particular query. + * @param results the events that were returned by the ES query + */ + static buildCursor(total: number, results: ResolverEvent[]): string | null { + if (total > results.length && results.length > 0) { + const lastResult = results[results.length - 1]; + const cursor = { + timestamp: lastResult['@timestamp'], + eventID: eventId(lastResult), + }; + return PaginationBuilder.urlEncodeCursor(cursor); + } + return null; + } + + /** + * Creates a PaginationBuilder with an upper bound limit of results and a specific cursor to use to retrieve the next + * set of results. + * + * @param limit upper bound for the number of results to return within this query + * @param after a cursor to retrieve the next set of results + */ + static createBuilder(limit: number, after?: string): PaginationBuilder { + if (after) { + try { + const cursor = PaginationBuilder.urlDecodeCursor(after); + if (cursor.timestamp && cursor.eventID) { + return new PaginationBuilder(limit, cursor.timestamp, cursor.eventID); + } + } catch (err) { + /* tslint:disable:no-empty */ + } // ignore invalid cursor values + } + return new PaginationBuilder(limit); } - return null; -} -export function paginatedResults(response: SearchResponse): PaginatedResults { - if (response.hits.hits.length === 0) { - return { totals: {}, results: [] }; + /** + * Creates an object for adding the pagination fields to a query + * + * @param numTerms number of unique IDs that are being search for in this query + * @param tiebreaker a unique field to use as the tiebreaker for the search_after + * @param aggregator the field that specifies a unique ID per event (e.g. entity_id) + * @param aggs other aggregations being used with this query + * @returns an object containing the pagination information + */ + buildQueryFields( + numTerms: number, + tiebreaker: string, + aggregator: string, + aggs: JsonObject = {} + ): JsonObject { + const fields: JsonObject = {}; + fields.sort = [{ '@timestamp': 'asc' }, { [tiebreaker]: 'asc' }]; + fields.aggs = { ...aggs, totals: { terms: { field: aggregator, size: numTerms } } }; + fields.size = this.size; + if (this.timestamp && this.eventID) { + fields.search_after = [this.timestamp, this.eventID] as Array; + } + return fields; } - const totals = response.aggregations?.totals?.buckets?.reduce( - // eslint-disable-next-line @typescript-eslint/no-explicit-any - (cummulative: any, bucket: any) => ({ ...cummulative, [bucket.key]: bucket.doc_count }), - {} - ); + /** + * Returns the totals found for the specified query + * + * @param aggregations the aggregation field from the ES response + * @returns a mapping of unique ID (e.g. entity_ids) to totals found for those IDs + */ + static getTotals(aggregations?: TotalsAggregation): Record { + if (!aggregations?.totals?.buckets) { + return {}; + } - const results = response.hits.hits.map((hit) => hit._source); - return { totals, results }; + return aggregations?.totals?.buckets?.reduce( + (cumulative: Record, bucket: AggBucket) => ({ + ...cumulative, + [bucket.key]: bucket.doc_count, + }), + {} + ); + } } diff --git a/x-pack/plugins/siem/server/endpoint/routes/resolver/utils/tree.test.ts b/x-pack/plugins/siem/server/endpoint/routes/resolver/utils/tree.test.ts new file mode 100644 index 0000000000000..eb80c840783ef --- /dev/null +++ b/x-pack/plugins/siem/server/endpoint/routes/resolver/utils/tree.test.ts @@ -0,0 +1,64 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { EndpointDocGenerator } from '../../../../../common/endpoint/generate_data'; +import { Tree } from './tree'; +import { + ResolverAncestry, + ResolverEvent, + ResolverRelatedEvents, +} from '../../../../../common/endpoint/types'; +import { entityId } from '../../../../../common/endpoint/models/event'; + +describe('Tree', () => { + const generator = new EndpointDocGenerator(); + + describe('ancestry', () => { + // transform the generator's array of events into the format expected by the tree class + const ancestorInfo: ResolverAncestry = { + ancestors: generator + .createAlertEventAncestry(5, 0, 0) + .filter((event) => { + return event.event.kind === 'event'; + }) + .map((event) => { + return { + entityID: event.process.entity_id, + // The generator returns Events, but the tree needs a ResolverEvent + lifecycle: [event as ResolverEvent], + }; + }), + nextAncestor: 'hello', + }; + + it('adds ancestors to the tree', () => { + const tree = new Tree(ancestorInfo.ancestors[0].entityID, { ancestry: ancestorInfo }); + const ids = tree.ids(); + ids.forEach((id) => { + const foundAncestor = ancestorInfo.ancestors.find( + (ancestor) => entityId(ancestor.lifecycle[0]) === id + ); + expect(foundAncestor).not.toBeUndefined(); + }); + expect(tree.render().ancestry.nextAncestor).toEqual('hello'); + }); + }); + + describe('related events', () => { + it('adds related events to the tree', () => { + const root = generator.generateEvent(); + const events: ResolverRelatedEvents = { + entityID: root.process.entity_id, + events: Array.from(generator.relatedEventsGenerator(root)), + nextEvent: null, + }; + const tree = new Tree(root.process.entity_id, { relatedEvents: events }); + const rendered = tree.render(); + expect(rendered.relatedEvents.nextEvent).toBeNull(); + expect(rendered.relatedEvents.events).toStrictEqual(events.events); + }); + }); +}); diff --git a/x-pack/plugins/siem/server/endpoint/routes/resolver/utils/tree.ts b/x-pack/plugins/siem/server/endpoint/routes/resolver/utils/tree.ts index 1fb70cf91adf8..048964068324b 100644 --- a/x-pack/plugins/siem/server/endpoint/routes/resolver/utils/tree.ts +++ b/x-pack/plugins/siem/server/endpoint/routes/resolver/utils/tree.ts @@ -7,18 +7,26 @@ import _ from 'lodash'; import { ResolverEvent, - ResolverNode, ResolverNodeStats, - ResolverNodePagination, + ResolverRelatedEvents, + ResolverAncestry, + ResolverTree, + ResolverChildren, } from '../../../../../common/endpoint/types'; -import { entityId, parentEntityId } from '../../../../../common/endpoint/models/event'; -import { buildPaginationCursor } from './pagination'; +import { createTree } from './node'; -type ExtractFunction = (event: ResolverEvent) => string | undefined; +interface Node { + entityID: string; + lifecycle: ResolverEvent[]; + stats?: ResolverNodeStats; +} -function createNode(id: string): ResolverNode { - return { id, children: [], pagination: {}, events: [], lifecycle: [] }; +export interface Options { + relatedEvents?: ResolverRelatedEvents; + ancestry?: ResolverAncestry; + children?: ResolverChildren; } + /** * This class aids in constructing a tree of process events. It works in the following way: * @@ -50,181 +58,101 @@ function createNode(id: string): ResolverNode { * what this means is that noisy neighbors for a given level may hide other child process events that occur later * temporally in the same level--so, while a heavily forking process might get shown, maybe the actually malicious * event doesn't show up in the tree at the beginning. + * + * This Tree's root/origin could be in the middle of the tree. The origin corresponds to the id passed in when this + * Tree object is constructed. The tree can have ancestors and children coming from the origin. */ - export class Tree { - protected cache: Map; - protected root: ResolverNode; - protected id: string; - - constructor(id: string) { - const root = createNode(id); - this.id = id; - this.cache = new Map(); - this.root = root; - this.cache.set(id, root); + protected cache: Map = new Map(); + protected tree: ResolverTree; + + constructor(protected readonly id: string, options: Options = {}) { + const tree = createTree(this.id); + this.tree = tree; + this.cache.set(id, tree); + + this.addRelatedEvents(options.relatedEvents); + this.addAncestors(options.ancestry); + this.addChildren(options.children); } - public render(): ResolverNode { - return this.root; + /** + * Return the origin node. The origin node is the node with the id that the tree was built using. + * + * @returns the origin ResolverNode + */ + public render(): ResolverTree { + return this.tree; } + /** + * Returns an array of all the unique IDs for the nodes stored in this tree. + * + * @returns an array of strings representing the unique IDs for the nodes in the tree + */ public ids(): string[] { return [...this.cache.keys()]; } - public static async merge( - childrenPromise: Promise, - ancestorsPromise: Promise, - eventsPromise: Promise - ): Promise { - const [children, ancestors, events] = await Promise.all([ - childrenPromise, - ancestorsPromise, - eventsPromise, - ]); - - /* - * we only allow for merging when we have partial trees that - * represent the same root node - */ - const rootID = children.id; - if (rootID !== ancestors.id || rootID !== events.id) { - throw new Error('cannot merge trees with different roots'); + /** + * Add related events for the tree's origin node. Related events cannot be added for other nodes. + * + * @param relatedEventsInfo is the related events and pagination information to add to the tree. + */ + private addRelatedEvents(relatedEventsInfo: ResolverRelatedEvents | undefined) { + if (!relatedEventsInfo) { + return; } - Object.entries(ancestors.cache).forEach(([id, node]) => { - if (rootID !== id) { - children.cache.set(id, node); - } - }); - - children.root.lifecycle = ancestors.root.lifecycle; - children.root.ancestors = ancestors.root.ancestors; - children.root.events = events.root.events; - - Object.assign(children.root.pagination, ancestors.root.pagination, events.root.pagination); - - return children; + this.tree.relatedEvents.events = relatedEventsInfo.events; + this.tree.relatedEvents.nextEvent = relatedEventsInfo.nextEvent; } - public addEvent(...events: ResolverEvent[]): void { - events.forEach((event) => { - const id = entityId(event); + /** + * Add ancestors to the tree. + * + * @param ancestorInfo is the ancestors and pagination information to add to the tree. + */ + private addAncestors(ancestorInfo: ResolverAncestry | undefined) { + if (!ancestorInfo) { + return; + } - this.ensureCache(id); - const currentNode = this.cache.get(id); - if (currentNode !== undefined) { - currentNode.events.push(event); - } - }); - } + this.tree.ancestry.nextAncestor = ancestorInfo.nextAncestor; - public addAncestor(id: string, ...events: ResolverEvent[]): void { - events.forEach((event) => { - const ancestorID = entityId(event); - if (this.cache.get(ancestorID) === undefined) { - const newParent = createNode(ancestorID); - this.cache.set(ancestorID, newParent); - if (!this.root.ancestors) { - this.root.ancestors = []; - } - this.root.ancestors.push(newParent); - } - const currentAncestor = this.cache.get(ancestorID); - if (currentAncestor !== undefined) { - currentAncestor.lifecycle.push(event); + // the ancestry info holds the lifecycle events for the root of the tree too, so we need to pull that out + ancestorInfo.ancestors.forEach((node) => { + if (node.entityID === this.id) { + this.tree.lifecycle = node.lifecycle; + return; } + this.cache.set(node.entityID, node); + this.tree.ancestry.ancestors.push(node); }); } - public addStats(id: string, stats: ResolverNodeStats): void { - this.ensureCache(id); + /** + * Add statistics to a node. + * + * @param id unique node ID to add the stats information to + * @param stats information indicating how many related events, and alerts exist for the specific node. + */ + public addStats(id: string, stats: ResolverNodeStats) { const currentNode = this.cache.get(id); if (currentNode !== undefined) { currentNode.stats = stats; } } - public setNextAncestor(next: string | null): void { - this.root.pagination.nextAncestor = next; - } - - public setNextEvent(next: string | null): void { - this.root.pagination.nextEvent = next; - } - - public setNextAlert(next: string | null): void { - this.root.pagination.nextAlert = next; - } - - public addChild(...events: ResolverEvent[]): void { - events.forEach((event) => { - const id = entityId(event); - const parentID = parentEntityId(event); - - this.ensureCache(parentID); - let currentNode = this.cache.get(id); - - if (currentNode === undefined) { - currentNode = createNode(id); - this.cache.set(id, currentNode); - if (parentID !== undefined) { - const parentNode = this.cache.get(parentID); - if (parentNode !== undefined) { - parentNode.children.push(currentNode); - } - } - } - currentNode.lifecycle.push(event); - }); - } - - public markLeafNode(...ids: string[]): void { - ids.forEach((id) => { - this.ensureCache(id); - const currentNode = this.cache.get(id); - if (currentNode !== undefined && !currentNode.pagination.nextChild) { - currentNode.pagination.nextChild = null; - } - }); - } - - public paginateEvents(totals: Record, events: ResolverEvent[]): void { - return this.paginate(entityId, 'nextEvent', totals, events); - } + private addChildren(children: ResolverChildren | undefined) { + if (!children) { + return; + } - public paginateChildren(totals: Record, children: ResolverEvent[]): void { - return this.paginate(parentEntityId, 'nextChild', totals, children); - } + this.tree.children = children; - private paginate( - grouper: ExtractFunction, - attribute: keyof ResolverNodePagination, - totals: Record, - records: ResolverEvent[] - ): void { - const grouped = _.groupBy(records, grouper); - Object.entries(totals).forEach(([id, total]) => { - if (this.cache.get(id) !== undefined) { - if (grouped[id]) { - /* - * if we have any results, attempt to build a pagination cursor, the function - * below hands back a null value if no cursor is necessary because we have - * all of the records. - */ - const currentNode = this.cache.get(id); - if (currentNode !== undefined) { - currentNode.pagination[attribute] = buildPaginationCursor(total, grouped[id]); - } - } - } + children.childNodes.forEach((child) => { + this.cache.set(child.entityID, child); }); } - - private ensureCache(id: string | undefined): void { - if (id === undefined || this.cache.get(id) === undefined) { - throw new Error('dangling node'); - } - } } diff --git a/x-pack/test/api_integration/apis/endpoint/resolver.ts b/x-pack/test/api_integration/apis/endpoint/resolver.ts index 73fe435764b74..e9ef8f581299b 100644 --- a/x-pack/test/api_integration/apis/endpoint/resolver.ts +++ b/x-pack/test/api_integration/apis/endpoint/resolver.ts @@ -3,241 +3,571 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ - +import _ from 'lodash'; import expect from '@kbn/expect'; +import { + ChildNode, + LifecycleNode, + ResolverAncestry, + ResolverEvent, + ResolverRelatedEvents, + ResolverChildren, + ResolverTree, + LegacyEndpointEvent, +} from '../../../../plugins/siem/common/endpoint/types'; +import { parentEntityId } from '../../../../plugins/siem/common/endpoint/models/event'; import { FtrProviderContext } from '../../ftr_provider_context'; -const commonHeaders = { - accept: 'application/json', - 'kbn-xsrf': 'some-xsrf-token', +import { Event, Tree, TreeNode } from '../../../../plugins/siem/common/endpoint/generate_data'; +import { Options, GeneratedTrees } from '../../services/resolver'; + +/** + * Check that the given lifecycle is in the resolver tree's corresponding map + * + * @param node a lifecycle node containing the start and end events for a node + * @param nodeMap a map of entity_ids to nodes to look for the passed in `node` + */ +const expectLifecycleNodeInMap = (node: LifecycleNode, nodeMap: Map) => { + const genNode = nodeMap.get(node.entityID); + expect(genNode).to.be.ok(); + compareArrays(genNode!.lifecycle, node.lifecycle, true); +}; + +/** + * Verify that all the ancestor nodes including the origin are valid. + * + * @param origin the origin node for the tree + * @param ancestors an array of ancestors + * @param tree the generated resolver tree as the source of truth + * @param verifyLastParent a boolean indicating whether to check the last ancestor. If the ancestors array intentionally + * does not contain all the ancestors, the last one will not have the parent + */ +const verifyAncestryFromOrigin = ( + origin: LifecycleNode, + ancestors: LifecycleNode[], + tree: Tree, + verifyLastParent: boolean +) => { + compareArrays(tree.origin.lifecycle, origin.lifecycle, true); + verifyAncestry(ancestors, tree, verifyLastParent); +}; + +/** + * Verify that all the ancestor nodes are valid and optionally have parents. + * + * @param ancestors an array of ancestors + * @param tree the generated resolver tree as the source of truth + * @param verifyLastParent a boolean indicating whether to check the last ancestor. If the ancestors array intentionally + * does not contain all the ancestors, the last one will not have the parent + */ +const verifyAncestry = (ancestors: LifecycleNode[], tree: Tree, verifyLastParent: boolean) => { + // group the ancestors by their entity_id mapped to a lifecycle node + const groupedAncestors = _.groupBy(ancestors, (ancestor) => ancestor.entityID); + // group by parent entity_id + const groupedAncestorsParent = _.groupBy(ancestors, (ancestor) => + parentEntityId(ancestor.lifecycle[0]) + ); + // make sure there aren't any nodes with the same entity_id + expect(Object.keys(groupedAncestors).length).to.eql(ancestors.length); + // make sure there aren't any nodes with the same parent entity_id + expect(Object.keys(groupedAncestorsParent).length).to.eql(ancestors.length); + ancestors.forEach((node) => { + const parentID = parentEntityId(node.lifecycle[0]); + // the last node generated will have `undefined` as the parent entity_id + if (parentID !== undefined && verifyLastParent) { + expect(groupedAncestors[parentID]).to.be.ok(); + } + expectLifecycleNodeInMap(node, tree.ancestry); + }); +}; + +/** + * Verify that the children nodes are correct + * + * @param children the children nodes + * @param tree the generated resolver tree as the source of truth + * @param numberOfParents an optional number to compare that are a certain number of parents in the children array + * @param childrenPerParent an optional number to compare that there are a certain number of children for each parent + */ +const verifyChildren = ( + children: ChildNode[], + tree: Tree, + numberOfParents?: number, + childrenPerParent?: number +) => { + // group the children by their entity_id mapped to a child node + const groupedChildren = _.groupBy(children, (child) => child.entityID); + // make sure each child is unique + expect(Object.keys(groupedChildren).length).to.eql(children.length); + if (numberOfParents !== undefined) { + const groupParent = _.groupBy(children, (child) => parentEntityId(child.lifecycle[0])); + expect(Object.keys(groupParent).length).to.eql(numberOfParents); + if (childrenPerParent !== undefined) { + Object.values(groupParent).forEach((childNodes) => + expect(childNodes.length).to.be(childrenPerParent) + ); + } + } + + children.forEach((child) => { + expectLifecycleNodeInMap(child, tree.children); + }); +}; + +/** + * Compare an array of events returned from an API with an array of events generated + * + * @param expected an array to use as the source of truth + * @param toTest the array to test against the source of truth + * @param lengthCheck an optional flag to check that the arrays are the same length + */ +const compareArrays = ( + expected: Event[], + toTest: ResolverEvent[], + lengthCheck: boolean = false +) => { + if (lengthCheck) { + expect(expected.length).to.eql(toTest.length); + } + toTest.forEach((toTestEvent) => { + expect( + expected.find((arrEvent) => { + return JSON.stringify(arrEvent) === JSON.stringify(toTestEvent); + }) + ).to.be.ok(); + }); }; -// eslint-disable-next-line import/no-default-export export default function resolverAPIIntegrationTests({ getService }: FtrProviderContext) { const supertest = getService('supertest'); const esArchiver = getService('esArchiver'); + const resolver = getService('resolverGenerator'); + + let resolverTrees: GeneratedTrees; + let tree: Tree; + const treeOptions: Options = { + ancestors: 5, + relatedEvents: 4, + children: 3, + generations: 2, + percentTerminated: 100, + percentWithRelated: 100, + numTrees: 1, + alwaysGenMaxChildrenPerNode: true, + }; describe('Resolver', () => { - before(async () => await esArchiver.load('endpoint/resolver/api_feature')); - after(async () => await esArchiver.unload('endpoint/resolver/api_feature')); - - describe('related events endpoint', () => { - const endpointID = '5a0c957f-b8e7-4538-965e-57e8bb86ad3a'; - const entityID = '94042'; - const cursor = 'eyJ0aW1lc3RhbXAiOjE1ODE0NTYyNTUwMDAsImV2ZW50SUQiOiI5NDA0MyJ9'; - - it('should return details for the root node', async () => { - const { body } = await supertest - .get(`/api/endpoint/resolver/${entityID}/events?legacyEndpointID=${endpointID}`) - .set(commonHeaders) - .expect(200); - expect(body.events.length).to.eql(1); - expect(body.pagination.nextEvent).to.eql(null); - }); + before(async () => { + await esArchiver.load('endpoint/resolver/api_feature'); + resolverTrees = await resolver.createTrees(treeOptions); + // we only requested a single alert so there's only 1 tree + tree = resolverTrees.trees[0]; + }); + after(async () => { + await resolver.deleteTrees(resolverTrees); + await esArchiver.unload('endpoint/resolver/api_feature'); + }); - it('returns no values when there is no more data', async () => { - const { body } = await supertest - // after is set to the document id of the last event so there shouldn't be any more after it - .get( - `/api/endpoint/resolver/${entityID}/events?legacyEndpointID=${endpointID}&afterEvent=${cursor}` - ) - .set(commonHeaders) - .expect(200); - expect(body.events).be.empty(); - expect(body.pagination.nextEvent).to.eql(null); - }); + describe('related events route', () => { + describe('legacy events', () => { + const endpointID = '5a0c957f-b8e7-4538-965e-57e8bb86ad3a'; + const entityID = '94042'; + const cursor = 'eyJ0aW1lc3RhbXAiOjE1ODE0NTYyNTUwMDAsImV2ZW50SUQiOiI5NDA0MyJ9'; - it('should return the first page of information when the cursor is invalid', async () => { - const { body } = await supertest - .get( - `/api/endpoint/resolver/${entityID}/events?legacyEndpointID=${endpointID}&afterEvent=blah` - ) - .set(commonHeaders) - .expect(200); - expect(body.pagination.nextEvent).to.eql(null); - }); + it('should return details for the root node', async () => { + const { body }: { body: ResolverRelatedEvents } = await supertest + .get(`/api/endpoint/resolver/${entityID}/events?legacyEndpointID=${endpointID}`) + .expect(200); + expect(body.events.length).to.eql(1); + expect(body.entityID).to.eql(entityID); + expect(body.nextEvent).to.eql(null); + }); - it('should error on invalid pagination values', async () => { - await supertest - .get(`/api/endpoint/resolver/${entityID}/events?events=0`) - .set(commonHeaders) - .expect(400); - await supertest - .get(`/api/endpoint/resolver/${entityID}/events?events=2000`) - .set(commonHeaders) - .expect(400); - await supertest - .get(`/api/endpoint/resolver/${entityID}/events?events=-1`) - .set(commonHeaders) - .expect(400); - }); + it('returns no values when there is no more data', async () => { + const { body }: { body: ResolverRelatedEvents } = await supertest + // after is set to the document id of the last event so there shouldn't be any more after it + .get( + `/api/endpoint/resolver/${entityID}/events?legacyEndpointID=${endpointID}&afterEvent=${cursor}` + ) + .expect(200); + expect(body.events).be.empty(); + expect(body.entityID).to.eql(entityID); + expect(body.nextEvent).to.eql(null); + }); + + it('should return the first page of information when the cursor is invalid', async () => { + const { body }: { body: ResolverRelatedEvents } = await supertest + .get( + `/api/endpoint/resolver/${entityID}/events?legacyEndpointID=${endpointID}&afterEvent=blah` + ) + .expect(200); + expect(body.entityID).to.eql(entityID); + expect(body.nextEvent).to.eql(null); + }); + + it('should return no results for an invalid endpoint ID', async () => { + const { body }: { body: ResolverRelatedEvents } = await supertest + .get(`/api/endpoint/resolver/${entityID}/events?legacyEndpointID=foo`) + .expect(200); + expect(body.nextEvent).to.eql(null); + expect(body.entityID).to.eql(entityID); + expect(body.events).to.be.empty(); + }); - it('should not find any events', async () => { - const { body } = await supertest - .get(`/api/endpoint/resolver/5555/events`) - .set(commonHeaders) - .expect(200); - expect(body.pagination.nextEvent).to.eql(null); - expect(body.events).to.be.empty(); + it('should error on invalid pagination values', async () => { + await supertest.get(`/api/endpoint/resolver/${entityID}/events?events=0`).expect(400); + await supertest.get(`/api/endpoint/resolver/${entityID}/events?events=2000`).expect(400); + await supertest.get(`/api/endpoint/resolver/${entityID}/events?events=-1`).expect(400); + }); }); - it('should return no results for an invalid endpoint ID', async () => { - const { body } = await supertest - .get(`/api/endpoint/resolver/${entityID}/events?legacyEndpointID=foo`) - .set(commonHeaders) - .expect(200); - expect(body.pagination.nextEvent).to.eql(null); - expect(body.events).to.be.empty(); + describe('endpoint events', () => { + it('should not find any events', async () => { + const { body }: { body: ResolverRelatedEvents } = await supertest + .get(`/api/endpoint/resolver/5555/events`) + .expect(200); + expect(body.nextEvent).to.eql(null); + expect(body.events).to.be.empty(); + }); + + it('should return details for the root node', async () => { + const { body }: { body: ResolverRelatedEvents } = await supertest + .get(`/api/endpoint/resolver/${tree.origin.id}/events`) + .expect(200); + expect(body.events.length).to.eql(4); + compareArrays(tree.origin.relatedEvents, body.events, true); + expect(body.nextEvent).to.eql(null); + }); + + it('should return paginated results for the root node', async () => { + let { body }: { body: ResolverRelatedEvents } = await supertest + .get(`/api/endpoint/resolver/${tree.origin.id}/events?events=2`) + .expect(200); + expect(body.events.length).to.eql(2); + compareArrays(tree.origin.relatedEvents, body.events); + expect(body.nextEvent).not.to.eql(null); + + ({ body } = await supertest + .get( + `/api/endpoint/resolver/${tree.origin.id}/events?events=2&afterEvent=${body.nextEvent}` + ) + .expect(200)); + expect(body.events.length).to.eql(2); + compareArrays(tree.origin.relatedEvents, body.events); + expect(body.nextEvent).to.not.eql(null); + + ({ body } = await supertest + .get( + `/api/endpoint/resolver/${tree.origin.id}/events?events=2&afterEvent=${body.nextEvent}` + ) + .expect(200)); + expect(body.events).to.be.empty(); + expect(body.nextEvent).to.eql(null); + }); + + it('should return the first page of information when the cursor is invalid', async () => { + const { body }: { body: ResolverRelatedEvents } = await supertest + .get(`/api/endpoint/resolver/${tree.origin.id}/events?afterEvent=blah`) + .expect(200); + expect(body.events.length).to.eql(4); + compareArrays(tree.origin.relatedEvents, body.events, true); + expect(body.nextEvent).to.eql(null); + }); }); }); - describe('lifecycle events endpoint', () => { - const endpointID = '5a0c957f-b8e7-4538-965e-57e8bb86ad3a'; - const entityID = '94042'; - - it('should return details for the root node', async () => { - const { body } = await supertest - .get( - `/api/endpoint/resolver/${entityID}/ancestry?legacyEndpointID=${endpointID}&ancestors=5` - ) - .set(commonHeaders) - .expect(200); - expect(body.lifecycle.length).to.eql(2); - expect(body.parent).not.to.eql(null); - expect(body.pagination.nextAncestor).to.eql(null); - }); + describe('ancestry events route', () => { + describe('legacy events', () => { + const endpointID = '5a0c957f-b8e7-4538-965e-57e8bb86ad3a'; + const entityID = '94042'; - it('should have a populated next parameter', async () => { - const { body } = await supertest - .get(`/api/endpoint/resolver/${entityID}/ancestry?legacyEndpointID=${endpointID}`) - .set(commonHeaders) - .expect(200); - expect(body.pagination.nextAncestor).to.eql('94041'); - }); + it('should return details for the root node', async () => { + const { body }: { body: ResolverAncestry } = await supertest + .get( + `/api/endpoint/resolver/${entityID}/ancestry?legacyEndpointID=${endpointID}&ancestors=5` + ) + .expect(200); + expect(body.ancestors[0].lifecycle.length).to.eql(2); + expect(body.nextAncestor).to.eql(null); + }); + + it('should have a populated next parameter', async () => { + const { body }: { body: ResolverAncestry } = await supertest + .get(`/api/endpoint/resolver/${entityID}/ancestry?legacyEndpointID=${endpointID}`) + .expect(200); + expect(body.nextAncestor).to.eql('94041'); + }); - it('should handle an ancestors param request', async () => { - let { body } = await supertest - .get(`/api/endpoint/resolver/${entityID}/ancestry?legacyEndpointID=${endpointID}`) - .set(commonHeaders) - .expect(200); - const next = body.pagination.nextAncestor; - - ({ body } = await supertest - .get(`/api/endpoint/resolver/${next}/ancestry?legacyEndpointID=${endpointID}&ancestors=1`) - .set(commonHeaders) - .expect(200)); - expect(body.lifecycle.length).to.eql(1); - expect(body.pagination.nextAncestor).to.eql(null); + it('should handle an ancestors param request', async () => { + let { body }: { body: ResolverAncestry } = await supertest + .get(`/api/endpoint/resolver/${entityID}/ancestry?legacyEndpointID=${endpointID}`) + .expect(200); + const next = body.nextAncestor; + + ({ body } = await supertest + .get( + `/api/endpoint/resolver/${next}/ancestry?legacyEndpointID=${endpointID}&ancestors=1` + ) + .expect(200)); + expect(body.ancestors[0].lifecycle.length).to.eql(1); + expect(body.nextAncestor).to.eql(null); + }); }); - it('should handle an invalid id', async () => { - const { body } = await supertest - .get(`/api/endpoint/resolver/alskdjflasj/ancestry`) - .set(commonHeaders) - .expect(200); - expect(body.lifecycle.length).to.eql(0); - expect(body.pagination.nextAncestor).to.eql(null); + describe('endpoint events', () => { + const getRootAndAncestry = (ancestry: ResolverAncestry) => { + return { root: ancestry.ancestors[0], ancestry: ancestry.ancestors.slice(1) }; + }; + + it('should return details for the root node', async () => { + const { body }: { body: ResolverAncestry } = await supertest + .get(`/api/endpoint/resolver/${tree.origin.id}/ancestry?ancestors=9`) + .expect(200); + // the tree we generated had 5 ancestors + 1 origin node + expect(body.ancestors.length).to.eql(6); + const ancestryInfo = getRootAndAncestry(body); + verifyAncestryFromOrigin(ancestryInfo.root, ancestryInfo.ancestry, tree, true); + expect(body.nextAncestor).to.eql(null); + }); + + it('should handle an invalid id', async () => { + const { body }: { body: ResolverAncestry } = await supertest + .get(`/api/endpoint/resolver/alskdjflasj/ancestry`) + .expect(200); + expect(body.ancestors).to.be.empty(); + expect(body.nextAncestor).to.eql(null); + }); + + it('should have a populated next parameter', async () => { + const { body }: { body: ResolverAncestry } = await supertest + .get(`/api/endpoint/resolver/${tree.origin.id}/ancestry?ancestors=2`) + .expect(200); + // it should have 2 ancestors + 1 origin + expect(body.ancestors.length).to.eql(3); + const ancestryInfo = getRootAndAncestry(body); + verifyAncestryFromOrigin(ancestryInfo.root, ancestryInfo.ancestry, tree, false); + expect(body.nextAncestor).to.eql( + // it should be the parent entity id on the last element of the ancestry array + parentEntityId(ancestryInfo.ancestry[ancestryInfo.ancestry.length - 1].lifecycle[0]) + ); + }); + + it('should handle multiple ancestor requests', async () => { + let { body }: { body: ResolverAncestry } = await supertest + .get(`/api/endpoint/resolver/${tree.origin.id}/ancestry?ancestors=3`) + .expect(200); + expect(body.ancestors.length).to.eql(4); + const next = body.nextAncestor; + + ({ body } = await supertest + .get(`/api/endpoint/resolver/${next}/ancestry?ancestors=1`) + .expect(200)); + expect(body.ancestors.length).to.eql(2); + verifyAncestry(body.ancestors, tree, true); + // the highest node in the generated tree will not have a parent ID which causes the server to return + // without setting the pagination so nextAncestor will be null + expect(body.nextAncestor).to.eql(null); + }); }); }); - describe('children endpoint', () => { - const endpointID = '5a0c957f-b8e7-4538-965e-57e8bb86ad3a'; - const entityID = '94041'; - const cursor = 'eyJ0aW1lc3RhbXAiOjE1ODE0NTYyNTUwMDAsImV2ZW50SUQiOiI5NDA0MiJ9'; - - it('returns child process lifecycle events', async () => { - const { body } = await supertest - .get(`/api/endpoint/resolver/${entityID}/children?legacyEndpointID=${endpointID}`) - .set(commonHeaders) - .expect(200); - expect(body.children.length).to.eql(1); - expect(body.children[0].lifecycle.length).to.eql(2); - expect(body.children[0].lifecycle[0].endgame.unique_pid).to.eql(94042); - }); + describe('children route', () => { + describe('legacy events', () => { + const endpointID = '5a0c957f-b8e7-4538-965e-57e8bb86ad3a'; + const entityID = '94041'; + const cursor = 'eyJ0aW1lc3RhbXAiOjE1ODE0NTYyNTUwMDAsImV2ZW50SUQiOiI5NDA0MiJ9'; - it('returns multiple levels of child process lifecycle events', async () => { - const { body } = await supertest - .get(`/api/endpoint/resolver/93802/children?legacyEndpointID=${endpointID}&generations=3`) - .set(commonHeaders) - .expect(200); - expect(body.pagination.nextChild).to.be(null); - expect(body.children[0].pagination.nextChild).to.be(null); - - expect(body.children.length).to.eql(8); - expect(body.children[0].children[0].lifecycle.length).to.eql(2); - expect(body.children[0].lifecycle[0].endgame.unique_pid).to.eql(93932); - }); + it('returns child process lifecycle events', async () => { + const { body }: { body: ResolverChildren } = await supertest + .get(`/api/endpoint/resolver/${entityID}/children?legacyEndpointID=${endpointID}`) + .expect(200); + expect(body.childNodes.length).to.eql(1); + expect(body.childNodes[0].lifecycle.length).to.eql(2); + expect( + // for some reason the ts server doesn't think `endgame` exists even though we're using ResolverEvent + // here, so to avoid it complaining we'll just force it + (body.childNodes[0].lifecycle[0] as LegacyEndpointEvent).endgame.unique_pid + ).to.eql(94042); + }); - it('returns no values when there is no more data', async () => { - const { body } = await supertest - // after is set to the document id of the last event so there shouldn't be any more after it - .get( - `/api/endpoint/resolver/${entityID}/children?legacyEndpointID=${endpointID}&afterChild=${cursor}` - ) - .set(commonHeaders) - .expect(200); - expect(body.children).be.empty(); - expect(body.pagination.nextChild).to.eql(null); - }); + it('returns multiple levels of child process lifecycle events', async () => { + const { body }: { body: ResolverChildren } = await supertest + .get( + `/api/endpoint/resolver/93802/children?legacyEndpointID=${endpointID}&generations=1` + ) + .expect(200); + expect(body.nextChild).to.be(null); + expect(body.childNodes[0].nextChild).to.be(null); + expect(body.childNodes.length).to.eql(8); + expect(body.childNodes[0].lifecycle.length).to.eql(1); + expect( + // for some reason the ts server doesn't think `endgame` exists even though we're using ResolverEvent + // here, so to avoid it complaining we'll just force it + (body.childNodes[0].lifecycle[0] as LegacyEndpointEvent).endgame.unique_pid + ).to.eql(93932); + }); - it('returns the first page of information when the cursor is invalid', async () => { - const { body } = await supertest - .get( - `/api/endpoint/resolver/${entityID}/children?legacyEndpointID=${endpointID}&afterChild=blah` - ) - .set(commonHeaders) - .expect(200); - expect(body.children.length).to.eql(1); - expect(body.pagination.nextChild).to.be(null); - }); + it('returns no values when there is no more data', async () => { + const { body } = await supertest + // after is set to the document id of the last event so there shouldn't be any more after it + .get( + `/api/endpoint/resolver/${entityID}/children?legacyEndpointID=${endpointID}&afterChild=${cursor}` + ) + .expect(200); + expect(body.childNodes).be.empty(); + expect(body.nextChild).to.eql(null); + }); - it('errors on invalid pagination values', async () => { - await supertest - .get(`/api/endpoint/resolver/${entityID}/children?children=0`) - .set(commonHeaders) - .expect(400); - await supertest - .get(`/api/endpoint/resolver/${entityID}/children?children=2000`) - .set(commonHeaders) - .expect(400); - await supertest - .get(`/api/endpoint/resolver/${entityID}/children?children=-1`) - .set(commonHeaders) - .expect(400); - }); + it('returns the first page of information when the cursor is invalid', async () => { + const { body }: { body: ResolverChildren } = await supertest + .get( + `/api/endpoint/resolver/${entityID}/children?legacyEndpointID=${endpointID}&afterChild=blah` + ) + .expect(200); + expect(body.childNodes.length).to.eql(1); + expect(body.nextChild).to.be(null); + }); + + it('errors on invalid pagination values', async () => { + await supertest.get(`/api/endpoint/resolver/${entityID}/children?children=0`).expect(400); + await supertest + .get(`/api/endpoint/resolver/${entityID}/children?children=2000`) + .expect(400); + await supertest + .get(`/api/endpoint/resolver/${entityID}/children?children=-1`) + .expect(400); + }); - it('returns empty events without a matching entity id', async () => { - const { body } = await supertest - .get(`/api/endpoint/resolver/5555/children`) - .set(commonHeaders) - .expect(200); - expect(body.pagination.nextChild).to.eql(null); - expect(body.children).to.be.empty(); + it('returns empty events without a matching entity id', async () => { + const { body }: { body: ResolverChildren } = await supertest + .get(`/api/endpoint/resolver/5555/children`) + .expect(200); + expect(body.nextChild).to.eql(null); + expect(body.childNodes).to.be.empty(); + }); + + it('returns empty events with an invalid endpoint id', async () => { + const { body }: { body: ResolverChildren } = await supertest + .get(`/api/endpoint/resolver/${entityID}/children?legacyEndpointID=foo`) + .expect(200); + expect(body.nextChild).to.eql(null); + expect(body.childNodes).to.be.empty(); + }); }); - it('returns empty events with an invalid endpoint id', async () => { - const { body } = await supertest - .get(`/api/endpoint/resolver/${entityID}/children?legacyEndpointID=foo`) - .set(commonHeaders) - .expect(200); - expect(body.pagination.nextChild).to.eql(null); - expect(body.children).to.be.empty(); + describe('endpoint events', () => { + it('returns all children for the origin', async () => { + const { body }: { body: ResolverChildren } = await supertest + .get(`/api/endpoint/resolver/${tree.origin.id}/children?children=100`) + .expect(200); + // there are 2 levels in the children part of the tree and 3 nodes for each = + // 3 children for the origin + 3 children for each of the origin's children = 12 + expect(body.childNodes.length).to.eql(12); + // there will be 4 parents, the origin of the tree, and it's 3 children + verifyChildren(body.childNodes, tree, 4, 3); + }); + + it('returns a single generation of children', async () => { + const { body }: { body: ResolverChildren } = await supertest + .get(`/api/endpoint/resolver/${tree.origin.id}/children?generations=1`) + .expect(200); + expect(body.childNodes.length).to.eql(3); + verifyChildren(body.childNodes, tree, 1, 3); + }); + + it('paginates the children of the origin node', async () => { + let { body }: { body: ResolverChildren } = await supertest + .get(`/api/endpoint/resolver/${tree.origin.id}/children?generations=1&children=1`) + .expect(200); + expect(body.childNodes.length).to.eql(1); + verifyChildren(body.childNodes, tree, 1, 1); + expect(body.nextChild).to.not.be(null); + + ({ body } = await supertest + .get( + `/api/endpoint/resolver/${tree.origin.id}/children?generations=1&afterChild=${body.nextChild}` + ) + .expect(200)); + expect(body.childNodes.length).to.eql(2); + verifyChildren(body.childNodes, tree, 1, 2); + expect(body.childNodes[0].nextChild).to.be(null); + expect(body.childNodes[1].nextChild).to.be(null); + }); + + it('paginates the children of different nodes', async () => { + let { body }: { body: ResolverChildren } = await supertest + .get(`/api/endpoint/resolver/${tree.origin.id}/children?generations=2&children=2`) + .expect(200); + // it should return 4 nodes total, 2 for each level + expect(body.childNodes.length).to.eql(4); + verifyChildren(body.childNodes, tree, 2); + expect(body.nextChild).to.not.be(null); + expect(body.childNodes[0].nextChild).to.not.be(null); + // the second child will not have any results returned for it so it should not have pagination set (the first) + // request to get it's children should start at the beginning aka not passing any pagination parameter + expect(body.childNodes[1].nextChild).to.be(null); + + const firstChild = body.childNodes[0]; + + // get the 3rd child of the origin of the tree + ({ body } = await supertest + .get( + `/api/endpoint/resolver/${tree.origin.id}/children?generations=1&children=10&afterChild=${body.nextChild}` + ) + .expect(200)); + expect(body.childNodes.length).to.be(1); + verifyChildren(body.childNodes, tree, 1, 1); + expect(body.childNodes[0].nextChild).to.be(null); + + // get the 1 child of the origin of the tree's last child + ({ body } = await supertest + .get( + `/api/endpoint/resolver/${firstChild.entityID}/children?generations=1&children=10&afterChild=${firstChild.nextChild}` + ) + .expect(200)); + expect(body.childNodes.length).to.be(1); + verifyChildren(body.childNodes, tree, 1, 1); + expect(body.childNodes[0].nextChild).to.be(null); + }); }); }); - describe('tree endpoint', () => { - const endpointID = '5a0c957f-b8e7-4538-965e-57e8bb86ad3a'; - - it('returns ancestors, events, children, and current process lifecycle', async () => { - const { body } = await supertest - .get(`/api/endpoint/resolver/93933?legacyEndpointID=${endpointID}`) - .set(commonHeaders) - .expect(200); - expect(body.pagination.nextAncestor).to.equal(null); - expect(body.pagination.nextEvent).to.equal(null); - expect(body.pagination.nextChild).to.equal(null); - expect(body.children.length).to.equal(0); - expect(body.events.length).to.equal(0); - expect(body.lifecycle.length).to.equal(2); + describe('tree api', () => { + describe('legacy events', () => { + const endpointID = '5a0c957f-b8e7-4538-965e-57e8bb86ad3a'; + + it('returns ancestors, events, children, and current process lifecycle', async () => { + const { body }: { body: ResolverTree } = await supertest + .get(`/api/endpoint/resolver/93933?legacyEndpointID=${endpointID}`) + .expect(200); + expect(body.ancestry.nextAncestor).to.equal(null); + expect(body.relatedEvents.nextEvent).to.equal(null); + expect(body.children.nextChild).to.equal(null); + expect(body.children.childNodes.length).to.equal(0); + expect(body.relatedEvents.events.length).to.equal(0); + expect(body.lifecycle.length).to.equal(2); + }); + }); + + describe('endpoint events', () => { + it('returns a tree', async () => { + const { body }: { body: ResolverTree } = await supertest + .get( + `/api/endpoint/resolver/${tree.origin.id}?children=100&generations=3&ancestors=5&events=4` + ) + .expect(200); + + expect(body.children.nextChild).to.equal(null); + expect(body.children.childNodes.length).to.equal(12); + verifyChildren(body.children.childNodes, tree, 4, 3); + + expect(body.ancestry.nextAncestor).to.equal(null); + verifyAncestry(body.ancestry.ancestors, tree, true); + + expect(body.relatedEvents.nextEvent).to.equal(null); + compareArrays(tree.origin.relatedEvents, body.relatedEvents.events, true); + + compareArrays(tree.origin.lifecycle, body.lifecycle, true); + }); }); }); }); diff --git a/x-pack/test/api_integration/services/index.ts b/x-pack/test/api_integration/services/index.ts index 6dcc9bb291b02..687984340d7d6 100644 --- a/x-pack/test/api_integration/services/index.ts +++ b/x-pack/test/api_integration/services/index.ts @@ -24,6 +24,7 @@ import { InfraOpsSourceConfigurationProvider } from './infraops_source_configura import { InfraLogSourceConfigurationProvider } from './infra_log_source_configuration'; import { MachineLearningProvider } from './ml'; import { IngestManagerProvider } from './ingest_manager'; +import { ResolverGeneratorProvider } from './resolver'; export const services = { ...commonServices, @@ -43,4 +44,5 @@ export const services = { usageAPI: UsageAPIProvider, ml: MachineLearningProvider, ingestManager: IngestManagerProvider, + resolverGenerator: ResolverGeneratorProvider, }; diff --git a/x-pack/test/api_integration/services/resolver.ts b/x-pack/test/api_integration/services/resolver.ts new file mode 100644 index 0000000000000..b1e58a0a1a3d1 --- /dev/null +++ b/x-pack/test/api_integration/services/resolver.ts @@ -0,0 +1,69 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +import { + TreeOptions, + Tree, + EndpointDocGenerator, +} from '../../../plugins/siem/common/endpoint/generate_data'; +import { FtrProviderContext } from '../ftr_provider_context'; + +/** + * Options for build a resolver tree + */ +export interface Options extends TreeOptions { + /** + * Number of trees to generate. + */ + numTrees?: number; +} + +/** + * Structure containing the generated trees and the ES index they live in + */ +export interface GeneratedTrees { + trees: Tree[]; + index: string; +} + +export function ResolverGeneratorProvider({ getService }: FtrProviderContext) { + const client = getService('es'); + + return { + async createTrees( + options: Options, + eventsIndex: string = 'events-endpoint-1' + ): Promise { + const allTrees: Tree[] = []; + const generator = new EndpointDocGenerator(); + const numTrees = options.numTrees ?? 1; + for (let j = 0; j < numTrees; j++) { + const tree = generator.generateTree(options); + const body = tree.allEvents.reduce( + (array: Array>, doc) => ( + /** + * We're using data streams which require that a bulk use `create` instead of `index`. + */ + array.push({ create: { _index: eventsIndex } }, doc), array + ), + [] + ); + // force a refresh here otherwise the documents might not be available when the tests search for them + await client.bulk({ body, refresh: 'true' }); + allTrees.push(tree); + } + return { trees: allTrees, index: eventsIndex }; + }, + async deleteTrees(trees: GeneratedTrees) { + /** + * The ingest manager handles creating the template for the endpoint's indices. It is using a V2 template + * with data streams. Data streams aren't included in the javascript elasticsearch client in kibana yet so we + * need to do raw requests here. Delete a data stream is slightly different than that of a regular index which + * is why we're using _data_stream here. + */ + await client.transport.request({ method: 'DELETE', path: `_data_stream/${trees.index}` }); + }, + }; +}