From f09f678fd1fb84b9c12ad6c76b1b7950f5febffa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Manuel=20de=20la=20Pe=C3=B1a?= Date: Tue, 12 Jan 2021 18:38:47 +0100 Subject: [PATCH 01/13] chore: remove fleet --- .ci/.e2e-tests.yaml | 6 - .../compose/profiles/fleet/docker-compose.yml | 42 - .../centos-systemd/docker-compose.yml | 10 - .../debian-systemd/docker-compose.yml | 10 - .../services/elastic-agent/docker-compose.yml | 14 - e2e/README.md | 12 +- e2e/_suites/fleet/README.md | 131 -- .../fleet/configurations/kibana.config.yml | 19 - .../agent_endpoint_integration.feature | 38 - .../fleet/features/fleet_mode_agent.feature | 125 -- .../fleet/features/stand_alone_agent.feature | 34 - e2e/_suites/fleet/fleet.go | 1465 ----------------- e2e/_suites/fleet/ingest-manager_test.go | 312 ---- e2e/_suites/fleet/integrations.go | 344 ---- e2e/_suites/fleet/services.go | 626 ------- e2e/_suites/fleet/stand-alone.go | 298 ---- 16 files changed, 3 insertions(+), 3483 deletions(-) delete mode 100644 cli/config/compose/profiles/fleet/docker-compose.yml delete mode 100644 cli/config/compose/services/centos-systemd/docker-compose.yml delete mode 100644 cli/config/compose/services/debian-systemd/docker-compose.yml delete mode 100644 cli/config/compose/services/elastic-agent/docker-compose.yml delete mode 100644 e2e/_suites/fleet/README.md delete mode 100644 e2e/_suites/fleet/configurations/kibana.config.yml delete mode 100644 e2e/_suites/fleet/features/agent_endpoint_integration.feature delete mode 100644 e2e/_suites/fleet/features/fleet_mode_agent.feature delete mode 100644 e2e/_suites/fleet/features/stand_alone_agent.feature delete mode 100644 e2e/_suites/fleet/fleet.go delete mode 100644 e2e/_suites/fleet/ingest-manager_test.go delete mode 100644 e2e/_suites/fleet/integrations.go delete mode 100644 e2e/_suites/fleet/services.go delete mode 100644 e2e/_suites/fleet/stand-alone.go diff --git a/.ci/.e2e-tests.yaml b/.ci/.e2e-tests.yaml index 91e482936f..935da19caa 100644 --- a/.ci/.e2e-tests.yaml +++ b/.ci/.e2e-tests.yaml @@ -6,12 +6,6 @@ SUITES: tags: "filebeat" - suite: "helm" tags: "metricbeat" - - suite: "fleet" - tags: "agent_endpoint_integration" - - suite: "fleet" - tags: "stand_alone_agent" - - suite: "fleet" - tags: "fleet_mode_agent" - suite: "metricbeat" tags: "integrations && activemq" - suite: "metricbeat" diff --git a/cli/config/compose/profiles/fleet/docker-compose.yml b/cli/config/compose/profiles/fleet/docker-compose.yml deleted file mode 100644 index ce627f1b87..0000000000 --- a/cli/config/compose/profiles/fleet/docker-compose.yml +++ /dev/null @@ -1,42 +0,0 @@ -version: '2.3' -services: - elasticsearch: - healthcheck: - test: ["CMD", "curl", "-f", "-u", "elastic:changeme", "http://127.0.0.1:9200/"] - retries: 300 - interval: 1s - environment: - - ES_JAVA_OPTS=-Xms1g -Xmx1g - - network.host= - - transport.host=127.0.0.1 - - http.host=0.0.0.0 - - indices.id_field_data.enabled=true - - xpack.license.self_generated.type=trial - - xpack.security.enabled=true - - xpack.security.authc.api_key.enabled=true - - ELASTIC_USERNAME=elastic - - ELASTIC_PASSWORD=changeme - image: "docker.elastic.co/elasticsearch/elasticsearch:${stackVersion:-8.0.0-SNAPSHOT}" - ports: - - "9200:9200" - kibana: - depends_on: - elasticsearch: - condition: service_healthy - package-registry: - condition: service_healthy - healthcheck: - test: "curl -f http://localhost:5601/login | grep kbn-injected-metadata 2>&1 >/dev/null" - retries: 600 - interval: 1s - image: "docker.elastic.co/kibana/kibana:${stackVersion:-8.0.0-SNAPSHOT}" - ports: - - "5601:5601" - volumes: - - ${kibanaConfigPath}:/usr/share/kibana/config/kibana.yml - package-registry: - image: docker.elastic.co/package-registry/distribution:staging - healthcheck: - test: ["CMD", "curl", "-f", "http://localhost:8080"] - retries: 300 - interval: 1s diff --git a/cli/config/compose/services/centos-systemd/docker-compose.yml b/cli/config/compose/services/centos-systemd/docker-compose.yml deleted file mode 100644 index 22509d5857..0000000000 --- a/cli/config/compose/services/centos-systemd/docker-compose.yml +++ /dev/null @@ -1,10 +0,0 @@ -version: '2.3' -services: - centos-systemd: - image: centos/systemd:${centos_systemdTag:-latest} - container_name: ${centos_systemdContainerName} - entrypoint: "/usr/sbin/init" - privileged: true - volumes: - - ${centos_systemdAgentBinarySrcPath:-.}:${centos_systemdAgentBinaryTargetPath:-/tmp} - - /sys/fs/cgroup:/sys/fs/cgroup:ro diff --git a/cli/config/compose/services/debian-systemd/docker-compose.yml b/cli/config/compose/services/debian-systemd/docker-compose.yml deleted file mode 100644 index 0e839e7673..0000000000 --- a/cli/config/compose/services/debian-systemd/docker-compose.yml +++ /dev/null @@ -1,10 +0,0 @@ -version: '2.3' -services: - debian-systemd: - image: alehaa/debian-systemd:${debian_systemdTag:-stretch} - container_name: ${debian_systemdContainerName} - entrypoint: "/sbin/init" - privileged: true - volumes: - - ${debian_systemdAgentBinarySrcPath:-.}:${debian_systemdAgentBinaryTargetPath:-/tmp} - - /sys/fs/cgroup:/sys/fs/cgroup:ro diff --git a/cli/config/compose/services/elastic-agent/docker-compose.yml b/cli/config/compose/services/elastic-agent/docker-compose.yml deleted file mode 100644 index 3adeffe94f..0000000000 --- a/cli/config/compose/services/elastic-agent/docker-compose.yml +++ /dev/null @@ -1,14 +0,0 @@ -version: '2.3' -services: - elastic-agent: - image: docker.elastic.co/${elasticAgentDockerNamespace:-beats}/elastic-agent${elasticAgentDockerImageSuffix}:${elasticAgentTag:-8.0.0-SNAPSHOT} - container_name: ${elasticAgentContainerName} - depends_on: - elasticsearch: - condition: service_healthy - kibana: - condition: service_healthy - environment: - - "KIBANA_HOST=http://${kibanaHost:-kibana}:${kibanaPort:-5601}" - volumes: - - "${elasticAgentConfigFile}:/usr/share/elastic-agent/elastic-agent.yml" diff --git a/e2e/README.md b/e2e/README.md index 1a6915776d..9efb0ab9e3 100644 --- a/e2e/README.md +++ b/e2e/README.md @@ -116,12 +116,6 @@ We are going to enumerate the variables that will affect the product versions us >It's important to notice that the 7.9.x branch in **Fleet** test suite uses different source code for the communications with Kibana Fleet plugin, as API endpoints changed from 7.9 to 7.10, so there could be some combinations that are broken. See https://github.com/elastic/e2e-testing/pull/348 for further reference about these breaking changes. > Related to this compatibility matrix too, it's also remarkable that Kibana **Fleet** plugin should not allow to enroll an agent with a version higher than kibana (See https://github.com/elastic/kibana/blob/fed9a4fddcc0087ee9eca6582a2a84e001890f08/x-pack/test/fleet_api_integration/apis/agents/enroll.ts#L99). - -#### Fleet -- `ELASTIC_AGENT_VERSION`. Set this environment variable to the proper version of the Elastic Agent to be used in the current execution. Default: See https://github.com/elastic/e2e-testing/blob/0446248bae1ff604219735998841a21a7576bfdd/.ci/Jenkinsfile#L36 -- `ELASTIC_AGENT_DOWNLOAD_URL`. Set this environment variable if you know the bucket URL for an Elastic Agent artifact generated by the CI, i.e. for a pull request. It will take precedence over the `ELASTIC_AGENT_VERSION` variable. Default empty: See https://github.com/elastic/e2e-testing/blob/0446248bae1ff604219735998841a21a7576bfdd/.ci/Jenkinsfile#L35 -- `ELASTIC_AGENT_STALE_VERSION`. Set this environment variable to the proper version of the Elastic Agent to be used in the upgrade tests, representing the version to be upgraded. Default: See https://github.com/elastic/e2e-testing/blob/b8d0cb09d575f90f447fe3331b6df0a185c01c89/.ci/Jenkinsfile#L38 - #### Helm charts - `HELM_CHART_VERSION`. Set this environment variable to the proper version of the Helm charts to be used in the current execution. Default: See https://github.com/elastic/e2e-testing/blob/0446248bae1ff604219735998841a21a7576bfdd/.ci/Jenkinsfile#L43 - `HELM_VERSION`. Set this environment variable to the proper version of Helm to be used in the current execution. Default: See https://github.com/elastic/e2e-testing/blob/0446248bae1ff604219735998841a21a7576bfdd/.ci/Jenkinsfile#L44 @@ -148,10 +142,10 @@ This example will run the Fleet tests for the 8.0.0-SNAPSHOT stack with the rele # Use the proper branch git checkout master # Run the tests for a specific branch -SUITE="fleet" \ +SUITE="metricbeat" \ TIMEOUT_FACTOR=3 LOG_LEVEL=TRACE \ - TAGS="fleet_mode" \ - ELASTIC_AGENT_VERSION="7.10.1" \ + TAGS="integrations && redis" \ + METRICBEAT_VERSION="6.8.0" \ make -C e2e functional-test ``` diff --git a/e2e/_suites/fleet/README.md b/e2e/_suites/fleet/README.md deleted file mode 100644 index a7fa2834fe..0000000000 --- a/e2e/_suites/fleet/README.md +++ /dev/null @@ -1,131 +0,0 @@ -# Fleet End-To-End tests - -## Motivation - -Our goal is for the Fleet team to execute this automated e2e test suite while developing the product. The tests in this folder assert that the use cases (or scenarios) defined in the `features` directory are behaving as expected. - -## How do the tests work? - -At the topmost level, the test framework uses a BDD framework written in Go, where we set -the expected behavior of use cases in a feature file using Gherkin, and implementing the steps in Go code. -The provisining of services is accomplish using Docker Compose and the [testcontainers-go](https://github.com/testcontainers/testcontainers-go) library. - -The tests will follow this general high-level approach: - -1. Install runtime dependencies as Docker containers via Docker Compose, happening at before the test suite runs. These runtime dependencies are defined in a specific `profile` for Fleet, in the form of a `docker-compose.yml` file. -1. Execute BDD steps representing each scenario. Each step will return an Error if the behavior is not satisfied, marking the step and the scenario as failed, or will return `nil`. - -## Known Limitations - -Because this framework uses Docker as the provisioning tool, all the services are based on Linux containers. That's why we consider this tool very suitable while developing the product, but would not cover the entire support matrix for the product: Linux, Windows, Mac, ARM, etc. - -For Windows or other platform support, we should build Windows images and containers or, given the cross-platform nature of Golang, should add the building blocks in the test framework to run the code in the ephemeral CI workers for the underlaying platform. - -### Diagnosing test failures - -The first step in determining the exact failure is to try and reproduce the test run locally, ideally using the DEBUG log level to enhance the log output. Once you've done that, look at the output from the test run. - -#### (For Mac) Docker is not able to save files in a temporary directory - -It's important to configure `Docker for Mac` to allow it accessing the `/var/folders` directory, as this framework uses Mac's default temporary directory for storing tempoorary files. - -To change it, please use Docker UI, go to `Preferences > Resources > File Sharing`, and add there `/var/folders` to the list of paths that can be mounted into Docker containers. For more information, please read https://docs.docker.com/docker-for-mac/#file-sharing. - -### Running the tests - -1. Clone this repository, say into a folder named `e2e-testing`. - - ``` shell - git clone git@github.com:elastic/e2e-testing.git - ``` - -2. Configure the version of the product you want to test (Optional). - -This is an example of the optional configuration: - - ```shell - # There should be a Docker image for the runtime dependencies (elasticsearch, kibana, package registry) - export STACK_VERSION=8.0.0-SNAPSHOT - # (Fleet mode) This environment variable will use a fixed version of the Elastic agent binary, obtained from - # https://artifacts-api.elastic.co/v1/search/8.0.0-SNAPSHOT/elastic-agent - export ELASTIC_AGENT_DOWNLOAD_URL="https://snapshots.elastic.co/8.0.0-59098054/downloads/beats/elastic-agent/elastic-agent-8.0.0-SNAPSHOT-linux-x86_64.tar.gz" - # (Fleet mode) This environment variable will use the snapshots produced by Beats CI. If the above variable - # is set, this variable will take no effect - export BEATS_USE_CI_SNAPSHOTS="true" - # (Stand-Alone mode) This environment variable will use the its value as the Docker tag produced by Beats CI (Please look up here: https://container-library.elastic.co/r/observability-ci/elastic-agent). Here you have two examples for tags: - export ELASTIC_AGENT_VERSION="pr-20356" - # or - export ELASTIC_AGENT_VERSION="78a762c76080aafa34c52386341b590dac24e2df" - ``` - -3. Define the proper Docker images to be used in tests (Optional). - - Update the Docker compose files with the local version of the images you want to use. - - >TBD: There is an initiative to automate this process to build the Docker image for a PR (or the local workspace) before running the tests, so the image is ready. - -4. Install dependencies. - - - Install Go: `https://golang.org/doc/install` _(The CI uses [GVM](https://github.com/andrewkroh/gvm))_ - - Install godog (from project's root directory): `make -C e2e install-godog` - -5. Run the tests. - - If you want to run the tests in Developer mode, which means reusing bakend services between test runs, please set this environment variable first: - - ```shell - # It won't tear down the backend services (ES, Kibana, Package Registry) or agent services after a test suite. - export DEVELOPER_MODE=true - ``` - - ```shell - cd e2e/_suites/fleet - OP_LOG_LEVEL=DEBUG godog - ``` - - The tests will take a few minutes to run, spinning up a few Docker containers representing the various products in this framework and performing the test steps outlined earlier. - - As the tests are running they will output the results in your terminal console. This will be quite verbose and you can ignore most of it until the tests finish. Then inspect at the output of the last play that ran and failed. On the contrary, you could use a different log level for the `OP_LOG_LEVEL` variable, being it possible to use `DEBUG`, `INFO (default)`, `WARN`, `ERROR`, `FATAL` as log levels. - -### Tests fail because the product could not be configured or run correctly - -This type of failure usually indicates that code for these tests itself needs to be changed. - -See the sections below on how to run the tests locally. - -### One or more scenarios fail - -Check if the scenario has an annotation/tag supporting the test runner to filter the execution by that tag. Godog will run those scenarios. For more information about tags: https://github.com/cucumber/godog/#tags - - ```shell - OP_LOG_LEVEL=DEBUG godog -t '@annotation' - ``` - -Example: - - ```shell - OP_LOG_LEVEL=DEBUG godog -t '@stand_alone_mode' - ``` - -### Setup failures - -Sometimes the tests could fail to configure or start a product such as Metricbeat, Elasticsearch, etc. To determine why -this happened, look at your terminal log in DEBUG mode. If a `docker-compose.yml` file is not present please execute this command: - -```shell -## Will remove tool's existing default files and will update them with the bundled ones. -make clean-workspace -``` - -If you see the docker images are outdated, please execute this command: - -```shell -## Will refresh stack images -make clean-docker -``` - -Note what you find and file a bug in the `elastic/e2e-testing` repository, requiring a fix to the Fleet suite to properly configure and start the product. - -### I cannot move on - -Please open an issue here: https://github.com/elastic/e2e-testing/issues/new diff --git a/e2e/_suites/fleet/configurations/kibana.config.yml b/e2e/_suites/fleet/configurations/kibana.config.yml deleted file mode 100644 index e0fd646242..0000000000 --- a/e2e/_suites/fleet/configurations/kibana.config.yml +++ /dev/null @@ -1,19 +0,0 @@ ---- -server.name: kibana -server.host: "0.0.0.0" - -telemetry.enabled: false - -elasticsearch.hosts: [ "http://elasticsearch:9200" ] -elasticsearch.username: elastic -elasticsearch.password: changeme -monitoring.ui.container.elasticsearch.enabled: true - -xpack.encryptedSavedObjects.encryptionKey: "12345678901234567890123456789012" - -xpack.fleet.enabled: true -xpack.fleet.registryUrl: http://package-registry:8080 -xpack.fleet.agents.enabled: true -xpack.fleet.agents.elasticsearch.host: http://elasticsearch:9200 -xpack.fleet.agents.kibana.host: http://kibana:5601 -xpack.fleet.agents.tlsCheckDisabled: true diff --git a/e2e/_suites/fleet/features/agent_endpoint_integration.feature b/e2e/_suites/fleet/features/agent_endpoint_integration.feature deleted file mode 100644 index 0f692c1ad8..0000000000 --- a/e2e/_suites/fleet/features/agent_endpoint_integration.feature +++ /dev/null @@ -1,38 +0,0 @@ -@agent_endpoint_integration -Feature: Agent Endpoint Integration - Scenarios for Agent to deploy Endpoint and sending data to Fleet and Elasticsearch. - -@deploy-endpoint-with-agent -Scenario: Adding the Endpoint Integration to an Agent makes the host to show in Security App - Given a "centos" agent is deployed to Fleet with "tar" installer - And the agent is listed in Fleet as "online" - When the "Endpoint Security" integration is "added" in the policy - Then the "Endpoint Security" datasource is shown in the policy as added - And the host name is shown in the Administration view in the Security App as "online" - -@endpoint-policy-check -Scenario: Deploying an Endpoint makes policies to appear in the Security App - When an Endpoint is successfully deployed with a "centos" Agent using "tar" installer - Then the policy response will be shown in the Security App - -@set-policy-and-check-changes -Scenario: Changing an Agent policy is reflected in the Security App - Given an Endpoint is successfully deployed with a "centos" Agent using "tar" installer - When the policy is updated to have "malware" in "detect" mode - Then the policy will reflect the change in the Security App - -@deploy-endpoint-then-unenroll-agent -Scenario: Un-enrolling Elastic Agent stops Elastic Endpoint - Given an Endpoint is successfully deployed with a "centos" Agent using "tar" installer - When the agent is un-enrolled - Then the agent is listed in Fleet as "inactive" - And the host name is not shown in the Administration view in the Security App - And the "elastic-endpoint" process is in the "stopped" state on the host - -@deploy-endpoint-then-remove-it-from-policy -Scenario: Removing Endpoint from Agent policy stops the connected Endpoint - Given an Endpoint is successfully deployed with a "centos" Agent using "tar" installer - When the "Endpoint Security" integration is "removed" in the policy - Then the agent is listed in Fleet as "online" - But the host name is not shown in the Administration view in the Security App - And the "elastic-endpoint" process is in the "stopped" state on the host diff --git a/e2e/_suites/fleet/features/fleet_mode_agent.feature b/e2e/_suites/fleet/features/fleet_mode_agent.feature deleted file mode 100644 index edc457fd9c..0000000000 --- a/e2e/_suites/fleet/features/fleet_mode_agent.feature +++ /dev/null @@ -1,125 +0,0 @@ -@fleet_mode_agent -Feature: Fleet Mode Agent - Scenarios for the Agent in Fleet mode connecting to Fleet application. - -@install -Scenario Outline: Deploying the agent - Given a "" agent is deployed to Fleet with "tar" installer - When the "elastic-agent" process is in the "started" state on the host - Then the "filebeat" process is in the "started" state on the host - And the "metricbeat" process is in the "started" state on the host - And the agent is listed in Fleet as "online" - And system package dashboards are listed in Fleet -Examples: -| os | -| centos | -| debian | - -@enroll -Scenario Outline: Deploying the agent with enroll and then run on rpm and deb - Given a "" agent is deployed to Fleet with "systemd" installer - When the "elastic-agent" process is in the "started" state on the host - Then the "filebeat" process is in the "started" state on the host - And the "metricbeat" process is in the "started" state on the host - And the agent is listed in Fleet as "online" - And system package dashboards are listed in Fleet -Examples: -| os | -| centos | -| debian | - -@stop-agent -Scenario Outline: Stopping the agent stops backend processes - Given a "" agent is deployed to Fleet with "tar" installer - When the "elastic-agent" process is "stopped" on the host - Then the "filebeat" process is in the "stopped" state on the host - And the "metricbeat" process is in the "stopped" state on the host -Examples: -| os | -| centos | -| debian | - -# @upgrade-agent -@skip -Scenario Outline: Upgrading the installed agent - Given a "" agent "stale" is deployed to Fleet with "tar" installer - And certs for "" are installed - When agent is upgraded to version "latest" - Then agent is in version "latest" -Examples: -| os | -| debian | - -@restart-agent -Scenario Outline: Restarting the installed agent - Given a "" agent is deployed to Fleet with "tar" installer - When the "elastic-agent" process is "restarted" on the host - Then the "filebeat" process is in the "started" state on the host - And the "metricbeat" process is in the "started" state on the host - And the agent is listed in Fleet as "online" -Examples: -| os | -| centos | -| debian | - -@restart-host -Scenario Outline: Restarting the host with persistent agent restarts backend processes - Given a "" agent is deployed to Fleet with "tar" installer - When the host is restarted - Then the "elastic-agent" process is in the "started" state on the host - And the "filebeat" process is in the "started" state on the host - And the "metricbeat" process is in the "started" state on the host -Examples: -| os | -| centos | -| debian | - -@unenroll -Scenario Outline: Un-enrolling the agent - Given a "" agent is deployed to Fleet with "tar" installer - When the agent is un-enrolled - Then the "elastic-agent" process is in the "started" state on the host - And the agent is listed in Fleet as "inactive" - And the "filebeat" process is in the "stopped" state on the host - And the "metricbeat" process is in the "stopped" state on the host -Examples: -| os | -| centos | -| debian | - -@reenroll -Scenario Outline: Re-enrolling the agent - Given a "" agent is deployed to Fleet with "tar" installer - And the agent is un-enrolled - And the "elastic-agent" process is "stopped" on the host - When the agent is re-enrolled on the host - Then the "elastic-agent" process is "started" on the host - And the agent is listed in Fleet as "online" -Examples: -| os | -| centos | -| debian | - -@revoke-token -Scenario Outline: Revoking the enrollment token for the agent - Given a "" agent is deployed to Fleet with "tar" installer - When the enrollment token is revoked - Then an attempt to enroll a new agent fails -Examples: -| os | -| centos | -| debian | - -@uninstall-host -Scenario Outline: Un-installing the installed agent - Given a "" agent is deployed to Fleet with "tar" installer - When the "elastic-agent" process is "uninstalled" on the host - Then the "elastic-agent" process is in the "stopped" state on the host - And the "filebeat" process is in the "stopped" state on the host - And the "metricbeat" process is in the "stopped" state on the host - And the file system Agent folder is empty - And the agent is listed in Fleet as "offline" -Examples: -| os | -| centos | -| debian | diff --git a/e2e/_suites/fleet/features/stand_alone_agent.feature b/e2e/_suites/fleet/features/stand_alone_agent.feature deleted file mode 100644 index bdc78beeec..0000000000 --- a/e2e/_suites/fleet/features/stand_alone_agent.feature +++ /dev/null @@ -1,34 +0,0 @@ -@stand_alone_agent -Feature: Stand-alone Agent - Scenarios for a standalone mode Elastic Agent in Fleet, where an Elasticseach - and a Kibana instances are already provisioned, so that the Agent is able to communicate - with them - -@start-agent -Scenario Outline: Starting the agent starts backend processes - When a "" stand-alone agent is deployed - Then the "filebeat" process is in the "started" state on the host - And the "metricbeat" process is in the "started" state on the host -Examples: -| image | -| default | -| ubi8 | - -@deploy-stand-alone -Scenario Outline: Deploying a stand-alone agent - When a "" stand-alone agent is deployed - Then there is new data in the index from agent -Examples: -| image | -| default | -| ubi8 | - -@stop-agent -Scenario Outline: Stopping the agent container stops data going into ES - Given a "" stand-alone agent is deployed - When the "elastic-agent" docker container is stopped - Then there is no new data in the index after agent shuts down -Examples: -| image | -| default | -| ubi8 | diff --git a/e2e/_suites/fleet/fleet.go b/e2e/_suites/fleet/fleet.go deleted file mode 100644 index 8b6491fe8f..0000000000 --- a/e2e/_suites/fleet/fleet.go +++ /dev/null @@ -1,1465 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package main - -import ( - "fmt" - "strings" - "time" - - "github.com/Jeffail/gabs/v2" - "github.com/cenkalti/backoff/v4" - "github.com/cucumber/godog" - "github.com/elastic/e2e-testing/cli/services" - curl "github.com/elastic/e2e-testing/cli/shell" - "github.com/elastic/e2e-testing/e2e" - "github.com/google/uuid" - "github.com/pkg/errors" - log "github.com/sirupsen/logrus" -) - -const fleetAgentsURL = kibanaBaseURL + "/api/fleet/agents" -const fleetAgentEventsURL = kibanaBaseURL + "/api/fleet/agents/%s/events" -const fleetAgentsUnEnrollURL = kibanaBaseURL + "/api/fleet/agents/%s/unenroll" -const fleetAgentUpgradeURL = kibanaBaseURL + "/api/fleet/agents/%s/upgrade" -const fleetEnrollmentTokenURL = kibanaBaseURL + "/api/fleet/enrollment-api-keys" -const fleetSetupURL = kibanaBaseURL + "/api/fleet/agents/setup" -const ingestManagerAgentPoliciesURL = kibanaBaseURL + "/api/fleet/agent_policies" -const ingestManagerDataStreamsURL = kibanaBaseURL + "/api/fleet/data_streams" - -const actionADDED = "added" -const actionREMOVED = "removed" - -// FleetTestSuite represents the scenarios for Fleet-mode -type FleetTestSuite struct { - Image string // base image used to install the agent - InstallerType string - Installers map[string]ElasticAgentInstaller - Cleanup bool - PolicyID string // will be used to manage tokens - CurrentToken string // current enrollment token - CurrentTokenID string // current enrollment tokenID - Hostname string // the hostname of the container - // integrations - Integration IntegrationPackage // the installed integration - PolicyUpdatedAt string // the moment the policy was updated -} - -// afterScenario destroys the state created by a scenario -func (fts *FleetTestSuite) afterScenario() { - serviceManager := services.NewServiceManager() - - serviceName := fts.Image - - if log.IsLevelEnabled(log.DebugLevel) { - installer := fts.getInstaller() - _ = installer.getElasticAgentLogs(fts.Hostname) - - err := installer.UninstallFn() - if err != nil { - log.Error("Could not uninstall the agent") - } - } - - err := fts.unenrollHostname(true) - if err != nil { - log.WithFields(log.Fields{ - "err": err, - "hostname": fts.Hostname, - }).Warn("The agentIDs for the hostname could not be unenrolled") - } - - if !developerMode { - _ = serviceManager.RemoveServicesFromCompose(FleetProfileName, []string{serviceName + "-systemd"}, profileEnv) - } else { - log.WithField("service", serviceName).Info("Because we are running in development mode, the service won't be stopped") - } - - err = fts.removeToken() - if err != nil { - log.WithFields(log.Fields{ - "err": err, - "tokenID": fts.CurrentTokenID, - }).Warn("The enrollment token could not be deleted") - } - - err = deleteIntegrationFromPolicy(fts.Integration, fts.PolicyID) - if err != nil { - log.WithFields(log.Fields{ - "err": err, - "packageConfigID": fts.Integration.packageConfigID, - "configurationID": fts.PolicyID, - }).Warn("The integration could not be deleted from the configuration") - } - - // clean up fields - fts.CurrentTokenID = "" - fts.Image = "" - fts.Hostname = "" -} - -// beforeScenario creates the state needed by a scenario -func (fts *FleetTestSuite) beforeScenario() { - fts.Cleanup = false - - // create policy with system monitoring enabled - defaultPolicy, err := getAgentDefaultPolicy() - if err != nil { - log.WithFields(log.Fields{ - "err": err, - }).Warn("The default policy could not be obtained") - - return - } - - fts.PolicyID = defaultPolicy.Path("id").Data().(string) -} - -func (fts *FleetTestSuite) contributeSteps(s *godog.Suite) { - s.Step(`^a "([^"]*)" agent is deployed to Fleet with "([^"]*)" installer$`, fts.anAgentIsDeployedToFleetWithInstaller) - s.Step(`^a "([^"]*)" agent "([^"]*)" is deployed to Fleet with "([^"]*)" installer$`, fts.anStaleAgentIsDeployedToFleetWithInstaller) - s.Step(`^agent is in version "([^"]*)"$`, fts.agentInVersion) - s.Step(`^agent is upgraded to version "([^"]*)"$`, fts.anAgentIsUpgraded) - s.Step(`^the agent is listed in Fleet as "([^"]*)"$`, fts.theAgentIsListedInFleetWithStatus) - s.Step(`^the host is restarted$`, fts.theHostIsRestarted) - s.Step(`^system package dashboards are listed in Fleet$`, fts.systemPackageDashboardsAreListedInFleet) - s.Step(`^the agent is un-enrolled$`, fts.theAgentIsUnenrolled) - s.Step(`^the agent is re-enrolled on the host$`, fts.theAgentIsReenrolledOnTheHost) - s.Step(`^the enrollment token is revoked$`, fts.theEnrollmentTokenIsRevoked) - s.Step(`^an attempt to enroll a new agent fails$`, fts.anAttemptToEnrollANewAgentFails) - s.Step(`^the "([^"]*)" process is "([^"]*)" on the host$`, fts.processStateChangedOnTheHost) - s.Step(`^the file system Agent folder is empty$`, fts.theFileSystemAgentFolderIsEmpty) - s.Step(`^certs for "([^"]*)" are installed$`, fts.installCerts) - - // endpoint steps - s.Step(`^the "([^"]*)" integration is "([^"]*)" in the policy$`, fts.theIntegrationIsOperatedInThePolicy) - s.Step(`^the "([^"]*)" datasource is shown in the policy as added$`, fts.thePolicyShowsTheDatasourceAdded) - s.Step(`^the host name is shown in the Administration view in the Security App as "([^"]*)"$`, fts.theHostNameIsShownInTheAdminViewInTheSecurityApp) - s.Step(`^the host name is not shown in the Administration view in the Security App$`, fts.theHostNameIsNotShownInTheAdminViewInTheSecurityApp) - s.Step(`^an Endpoint is successfully deployed with a "([^"]*)" Agent using "([^"]*)" installer$`, fts.anEndpointIsSuccessfullyDeployedWithAgentAndInstalller) - s.Step(`^the policy response will be shown in the Security App$`, fts.thePolicyResponseWillBeShownInTheSecurityApp) - s.Step(`^the policy is updated to have "([^"]*)" in "([^"]*)" mode$`, fts.thePolicyIsUpdatedToHaveMode) - s.Step(`^the policy will reflect the change in the Security App$`, fts.thePolicyWillReflectTheChangeInTheSecurityApp) -} - -func (fts *FleetTestSuite) anStaleAgentIsDeployedToFleetWithInstaller(image, version, installerType string) error { - agentVersionBackup := agentVersion - defer func() { agentVersion = agentVersionBackup }() - - switch version { - case "stale": - version = agentStaleVersion - case "latest": - version = agentVersion - default: - version = agentStaleVersion - } - - agentVersion = version - - // prepare installer for stale version - if agentVersion != agentVersionBackup { - i := GetElasticAgentInstaller(image, installerType) - installerType = fmt.Sprintf("%s-%s", installerType, version) - fts.Installers[fmt.Sprintf("%s-%s", image, installerType)] = i - } - - return fts.anAgentIsDeployedToFleetWithInstaller(image, installerType) -} - -func (fts *FleetTestSuite) installCerts(targetOS string) error { - installer := fts.getInstaller() - if installer.InstallCertsFn == nil { - return errors.New("no installer found") - } - - return installer.InstallCertsFn() -} - -func (fts *FleetTestSuite) anAgentIsUpgraded(desiredVersion string) error { - switch desiredVersion { - case "stale": - desiredVersion = agentStaleVersion - case "latest": - desiredVersion = agentVersion - default: - desiredVersion = agentVersion - } - - return fts.upgradeAgent(desiredVersion) -} - -func (fts *FleetTestSuite) agentInVersion(version string) error { - switch version { - case "stale": - version = agentStaleVersion - case "latest": - version = agentVersion - } - - agentInVersionFn := func() error { - agentID, err := getAgentID(fts.Hostname) - if err != nil { - return err - } - - r := createDefaultHTTPRequest(fleetAgentsURL + "/" + agentID) - body, err := curl.Get(r) - if err != nil { - log.WithFields(log.Fields{ - "body": body, - "error": err, - "url": r.GetURL(), - }).Error("Could not get agent in Fleet") - return err - } - - jsonResponse, err := gabs.ParseJSON([]byte(body)) - - retrievedVersion := jsonResponse.Path("item.local_metadata.elastic.agent.version").Data().(string) - if isSnapshot := jsonResponse.Path("item.local_metadata.elastic.agent.snapshot").Data().(bool); isSnapshot { - retrievedVersion += "-SNAPSHOT" - } - - if retrievedVersion != version { - return fmt.Errorf("version mismatch required '%s' retrieved '%s'", version, retrievedVersion) - } - - return nil - } - - maxTimeout := time.Duration(timeoutFactor) * time.Minute * 2 - exp := e2e.GetExponentialBackOff(maxTimeout) - - return backoff.Retry(agentInVersionFn, exp) -} - -// supported installers: tar, systemd -func (fts *FleetTestSuite) anAgentIsDeployedToFleetWithInstaller(image string, installerType string) error { - log.WithFields(log.Fields{ - "image": image, - "installer": installerType, - }).Trace("Deploying an agent to Fleet with base image") - - fts.Image = image - fts.InstallerType = installerType - - installer := fts.getInstaller() - - profile := installer.profile // name of the runtime dependencies compose file - - serviceName := ElasticAgentServiceName // name of the service - containerName := fmt.Sprintf("%s_%s_%s_%d", profile, fts.Image+"-systemd", serviceName, 1) // name of the container - - uuid := uuid.New().String() - - // enroll the agent with a new token - tokenJSONObject, err := createFleetToken("Test token for "+uuid, fts.PolicyID) - if err != nil { - return err - } - fts.CurrentToken = tokenJSONObject.Path("api_key").Data().(string) - fts.CurrentTokenID = tokenJSONObject.Path("id").Data().(string) - - err = deployAgentToFleet(installer, containerName, fts.CurrentToken) - fts.Cleanup = true - if err != nil { - return err - } - - // the installation process for TAR includes the enrollment - if installer.installerType != "tar" { - err = installer.EnrollFn(fts.CurrentToken) - if err != nil { - return err - } - } - - // get container hostname once - hostname, err := getContainerHostname(containerName) - if err != nil { - return err - } - fts.Hostname = hostname - - return err -} - -func (fts *FleetTestSuite) getInstaller() ElasticAgentInstaller { - return fts.Installers[fts.Image+"-"+fts.InstallerType] -} - -func (fts *FleetTestSuite) processStateChangedOnTheHost(process string, state string) error { - profile := FleetProfileName - - installer := fts.getInstaller() - - serviceName := installer.service // name of the service - - if state == "started" { - return systemctlRun(profile, installer.image, serviceName, "start") - } else if state == "restarted" { - return systemctlRun(profile, installer.image, serviceName, "restart") - } else if state == "uninstalled" { - return installer.UninstallFn() - } else if state != "stopped" { - return godog.ErrPending - } - - log.WithFields(log.Fields{ - "service": serviceName, - "process": process, - }).Trace("Stopping process on the service") - - err := systemctlRun(profile, installer.image, serviceName, "stop") - if err != nil { - log.WithFields(log.Fields{ - "action": state, - "error": err, - "service": serviceName, - "process": process, - }).Error("Could not stop process on the host") - - return err - } - - // name of the container for the service: - // we are using the Docker client instead of docker-compose - // because it does not support returning the output of a - // command: it simply returns error level - containerName := fmt.Sprintf("%s_%s_%s_%d", profile, fts.Image+"-systemd", ElasticAgentServiceName, 1) - return checkProcessStateOnTheHost(containerName, process, "stopped") -} - -func (fts *FleetTestSuite) setup() error { - log.Trace("Creating Fleet setup") - - err := createFleetConfiguration() - if err != nil { - return err - } - - err = checkFleetConfiguration() - if err != nil { - return err - } - - return nil -} - -func (fts *FleetTestSuite) theAgentIsListedInFleetWithStatus(desiredStatus string) error { - log.Tracef("Checking if agent is listed in Fleet as %s", desiredStatus) - - maxTimeout := time.Duration(timeoutFactor) * time.Minute * 2 - retryCount := 1 - - exp := e2e.GetExponentialBackOff(maxTimeout) - - agentOnlineFn := func() error { - agentID, err := getAgentID(fts.Hostname) - if err != nil { - retryCount++ - return err - } - - if agentID == "" { - // the agent is not listed in Fleet - if desiredStatus == "offline" || desiredStatus == "inactive" { - log.WithFields(log.Fields{ - "isAgentInStatus": isAgentInStatus, - "elapsedTime": exp.GetElapsedTime(), - "hostname": fts.Hostname, - "retries": retryCount, - "status": desiredStatus, - }).Info("The Agent is not present in Fleet, as expected") - return nil - } else if desiredStatus == "online" { - retryCount++ - return fmt.Errorf("The agent is not present in Fleet, but it should") - } - } - - isAgentInStatus, err := isAgentInStatus(agentID, desiredStatus) - if err != nil || !isAgentInStatus { - if err == nil { - err = fmt.Errorf("The Agent is not in the %s status yet", desiredStatus) - } - - log.WithFields(log.Fields{ - "agentID": agentID, - "isAgentInStatus": isAgentInStatus, - "elapsedTime": exp.GetElapsedTime(), - "hostname": fts.Hostname, - "retry": retryCount, - "status": desiredStatus, - }).Warn(err.Error()) - - retryCount++ - - return err - } - - log.WithFields(log.Fields{ - "isAgentInStatus": isAgentInStatus, - "elapsedTime": exp.GetElapsedTime(), - "hostname": fts.Hostname, - "retries": retryCount, - "status": desiredStatus, - }).Info("The Agent is in the desired status") - return nil - } - - err := backoff.Retry(agentOnlineFn, exp) - if err != nil { - return err - } - - return nil -} - -func (fts *FleetTestSuite) theFileSystemAgentFolderIsEmpty() error { - installer := fts.getInstaller() - - profile := installer.profile // name of the runtime dependencies compose file - - // name of the container for the service: - // we are using the Docker client instead of docker-compose - // because it does not support returning the output of a - // command: it simply returns error level - containerName := fmt.Sprintf("%s_%s_%s_%d", profile, fts.Image+"-systemd", ElasticAgentServiceName, 1) - - content, err := installer.listElasticAgentWorkingDirContent(containerName) - if err != nil { - return err - } - - if strings.Contains(content, "No such file or directory") { - return nil - } - - return fmt.Errorf("The file system directory is not empty") -} - -func (fts *FleetTestSuite) theHostIsRestarted() error { - serviceManager := services.NewServiceManager() - - installer := fts.getInstaller() - - profile := installer.profile // name of the runtime dependencies compose file - image := installer.image // image of the service - service := installer.service // name of the service - - composes := []string{ - profile, // profile name - image, // service - } - - err := serviceManager.RunCommand(profile, composes, []string{"restart", service}, profileEnv) - if err != nil { - log.WithFields(log.Fields{ - "image": image, - "service": service, - }).Error("Could not restart the service") - return err - } - - log.WithFields(log.Fields{ - "image": image, - "service": service, - }).Debug("The service has been restarted") - return nil -} - -func (fts *FleetTestSuite) systemPackageDashboardsAreListedInFleet() error { - log.Trace("Checking system Package dashboards in Fleet") - - dataStreamsCount := 0 - maxTimeout := time.Duration(timeoutFactor) * time.Minute - retryCount := 1 - - exp := e2e.GetExponentialBackOff(maxTimeout) - - countDataStreamsFn := func() error { - dataStreams, err := getDataStreams() - if err != nil { - log.WithFields(log.Fields{ - "retry": retryCount, - "elapsedTime": exp.GetElapsedTime(), - }).Warn(err.Error()) - - retryCount++ - - return err - } - - count := len(dataStreams.Children()) - if count == 0 { - err = fmt.Errorf("There are no datastreams yet") - - log.WithFields(log.Fields{ - "retry": retryCount, - "dataStreams": count, - "elapsedTime": exp.GetElapsedTime(), - }).Warn(err.Error()) - - retryCount++ - - return err - } - - log.WithFields(log.Fields{ - "elapsedTime": exp.GetElapsedTime(), - "datastreams": count, - "retries": retryCount, - }).Info("Datastreams are present") - dataStreamsCount = count - return nil - } - - err := backoff.Retry(countDataStreamsFn, exp) - if err != nil { - return err - } - - if dataStreamsCount == 0 { - err = fmt.Errorf("There are no datastreams. We expected to have more than one") - log.Error(err.Error()) - return err - } - - return nil -} - -func (fts *FleetTestSuite) theAgentIsUnenrolled() error { - return fts.unenrollHostname(false) -} - -func (fts *FleetTestSuite) theAgentIsReenrolledOnTheHost() error { - log.Trace("Re-enrolling the agent on the host with same token") - - installer := fts.getInstaller() - - err := installer.EnrollFn(fts.CurrentToken) - if err != nil { - return err - } - - return nil -} - -func (fts *FleetTestSuite) theEnrollmentTokenIsRevoked() error { - log.WithFields(log.Fields{ - "token": fts.CurrentToken, - "tokenID": fts.CurrentTokenID, - }).Trace("Revoking enrollment token") - - err := fts.removeToken() - if err != nil { - return err - } - - log.WithFields(log.Fields{ - "token": fts.CurrentToken, - "tokenID": fts.CurrentTokenID, - }).Debug("Token was revoked") - - return nil -} - -func (fts *FleetTestSuite) thePolicyShowsTheDatasourceAdded(packageName string) error { - log.WithFields(log.Fields{ - "policyID": fts.PolicyID, - "package": packageName, - }).Trace("Checking if the policy shows the package added") - - maxTimeout := time.Minute - retryCount := 1 - - exp := e2e.GetExponentialBackOff(maxTimeout) - - integration, err := getIntegrationFromAgentPolicy(packageName, fts.PolicyID) - if err != nil { - return err - } - fts.Integration = integration - - configurationIsPresentFn := func() error { - defaultPolicy, err := getAgentDefaultPolicy() - if err != nil { - log.WithFields(log.Fields{ - "error": err, - "policyID": fts.PolicyID, - "retry": retryCount, - }).Warn("An error retrieving the policy happened") - - retryCount++ - - return err - } - - packagePolicies := defaultPolicy.Path("package_policies") - - for _, child := range packagePolicies.Children() { - id := child.Data().(string) - if id == fts.Integration.packageConfigID { - log.WithFields(log.Fields{ - "packageConfigID": fts.Integration.packageConfigID, - "policyID": fts.PolicyID, - }).Info("The integration was found in the policy") - return nil - } - } - - log.WithFields(log.Fields{ - "packageConfigID": fts.Integration.packageConfigID, - "policyID": fts.PolicyID, - "retry": retryCount, - }).Warn("The integration was not found in the policy") - - retryCount++ - - return err - } - - err = backoff.Retry(configurationIsPresentFn, exp) - if err != nil { - return err - } - - return nil -} - -func (fts *FleetTestSuite) theIntegrationIsOperatedInThePolicy(packageName string, action string) error { - log.WithFields(log.Fields{ - "action": action, - "policyID": fts.PolicyID, - "package": packageName, - }).Trace("Doing an operation for a package on a policy") - - if strings.ToLower(action) == actionADDED { - name, version, err := getIntegrationLatestVersion(packageName) - if err != nil { - return err - } - - integration, err := getIntegration(name, version) - if err != nil { - return err - } - fts.Integration = integration - - integrationPolicyID, err := addIntegrationToPolicy(fts.Integration, fts.PolicyID) - if err != nil { - return err - } - - fts.Integration.packageConfigID = integrationPolicyID - return nil - } else if strings.ToLower(action) == actionREMOVED { - integration, err := getIntegrationFromAgentPolicy(packageName, fts.PolicyID) - if err != nil { - return err - } - fts.Integration = integration - - err = deleteIntegrationFromPolicy(fts.Integration, fts.PolicyID) - if err != nil { - log.WithFields(log.Fields{ - "err": err, - "packageConfigID": fts.Integration.packageConfigID, - "policyID": fts.PolicyID, - }).Error("The integration could not be deleted from the policy") - return err - } - return nil - } - - return godog.ErrPending -} - -func (fts *FleetTestSuite) theHostNameIsNotShownInTheAdminViewInTheSecurityApp() error { - log.Trace("Checking if the hostname is not shown in the Administration view in the Security App") - - maxTimeout := time.Duration(timeoutFactor) * time.Minute - retryCount := 1 - - exp := e2e.GetExponentialBackOff(maxTimeout) - - agentListedInSecurityFn := func() error { - host, err := isAgentListedInSecurityApp(fts.Hostname) - if err != nil { - log.WithFields(log.Fields{ - "elapsedTime": exp.GetElapsedTime(), - "err": err, - "host": host, - "hostname": fts.Hostname, - "retry": retryCount, - }).Warn("We could not check the agent in the Administration view in the Security App yet") - - retryCount++ - - return err - } - - if host != nil { - log.WithFields(log.Fields{ - "elapsedTime": exp.GetElapsedTime(), - "host": host, - "hostname": fts.Hostname, - "retry": retryCount, - }).Warn("The host is still present in the Administration view in the Security App") - - retryCount++ - - return fmt.Errorf("The host %s is still present in the Administration view in the Security App", fts.Hostname) - } - - log.WithFields(log.Fields{ - "elapsedTime": exp.GetElapsedTime(), - "hostname": fts.Hostname, - "retries": retryCount, - }).Info("The Agent is not listed in the Administration view in the Security App") - return nil - } - - err := backoff.Retry(agentListedInSecurityFn, exp) - if err != nil { - return err - } - - return nil -} - -func (fts *FleetTestSuite) theHostNameIsShownInTheAdminViewInTheSecurityApp(status string) error { - log.Trace("Checking if the hostname is shown in the Admin view in the Security App") - - maxTimeout := time.Duration(timeoutFactor) * time.Minute - retryCount := 1 - - exp := e2e.GetExponentialBackOff(maxTimeout) - - agentListedInSecurityFn := func() error { - matches, err := isAgentListedInSecurityAppWithStatus(fts.Hostname, status) - if err != nil || !matches { - log.WithFields(log.Fields{ - "elapsedTime": exp.GetElapsedTime(), - "desiredStatus": status, - "err": err, - "hostname": fts.Hostname, - "matches": matches, - "retry": retryCount, - }).Warn("The agent is not listed in the Administration view in the Security App in the desired status yet") - - retryCount++ - - return err - } - - log.WithFields(log.Fields{ - "elapsedTime": exp.GetElapsedTime(), - "desiredStatus": status, - "hostname": fts.Hostname, - "matches": matches, - "retries": retryCount, - }).Info("The Agent is listed in the Administration view in the Security App in the desired status") - return nil - } - - err := backoff.Retry(agentListedInSecurityFn, exp) - if err != nil { - return err - } - - return nil -} - -func (fts *FleetTestSuite) anEndpointIsSuccessfullyDeployedWithAgentAndInstalller(image string, installer string) error { - err := fts.anAgentIsDeployedToFleetWithInstaller(image, installer) - if err != nil { - return err - } - - err = fts.theAgentIsListedInFleetWithStatus("online") - if err != nil { - return err - } - - // we use integration's title - return fts.theIntegrationIsOperatedInThePolicy(elasticEnpointIntegrationTitle, actionADDED) -} - -func (fts *FleetTestSuite) thePolicyResponseWillBeShownInTheSecurityApp() error { - agentID, err := getAgentID(fts.Hostname) - if err != nil { - return err - } - - maxTimeout := time.Duration(timeoutFactor) * time.Minute - retryCount := 1 - - exp := e2e.GetExponentialBackOff(maxTimeout) - - getEventsFn := func() error { - listed, err := isPolicyResponseListedInSecurityApp(agentID) - if err != nil { - log.WithFields(log.Fields{ - "elapsedTime": exp.GetElapsedTime(), - "err": err, - "retries": retryCount, - }).Warn("Could not get metadata from the Administration view in the Security App yet") - retryCount++ - - return err - } - - if !listed { - log.WithFields(log.Fields{ - "agentID": agentID, - "elapsedTime": exp.GetElapsedTime(), - "retries": retryCount, - }).Warn("The policy response is not listed as 'success' in the Administration view in the Security App yet") - retryCount++ - - return fmt.Errorf("The policy response is not listed as 'success' in the Administration view in the Security App yet") - } - - log.WithFields(log.Fields{ - "elapsedTime": exp.GetElapsedTime(), - "retries": retryCount, - }).Info("The policy response is listed as 'success' in the Administration view in the Security App") - return nil - } - - err = backoff.Retry(getEventsFn, exp) - if err != nil { - return err - } - - return nil -} - -func (fts *FleetTestSuite) thePolicyIsUpdatedToHaveMode(name string, mode string) error { - if name != "malware" { - log.WithFields(log.Fields{ - "name": name, - }).Warn("We only support 'malware' policy to be updated") - return godog.ErrPending - } - - if mode != "detect" && mode != "prevent" { - log.WithFields(log.Fields{ - "name": name, - "mode": mode, - }).Warn("We only support 'detect' and 'prevent' modes") - return godog.ErrPending - } - - integration, err := getIntegrationFromAgentPolicy(elasticEnpointIntegrationTitle, fts.PolicyID) - if err != nil { - return err - } - fts.Integration = integration - - integrationJSON := fts.Integration.json - - // prune fields not allowed in the API side - prunedFields := []string{ - "created_at", "created_by", "id", "revision", "updated_at", "updated_by", - } - for _, f := range prunedFields { - integrationJSON.Delete(f) - } - - // wee only support Windows and Mac, not Linux - integrationJSON.SetP(mode, "inputs.0.config.policy.value.windows."+name+".mode") - integrationJSON.SetP(mode, "inputs.0.config.policy.value.mac."+name+".mode") - - response, err := updateIntegrationPackageConfig(fts.Integration.packageConfigID, integrationJSON.String()) - if err != nil { - return err - } - - // we use a string because we are not able to process what comes in the event, so we will do - // an alphabetical order, as they share same layout but different millis and timezone format - updatedAt := response.Path("item.updated_at").Data().(string) - fts.PolicyUpdatedAt = updatedAt - return nil -} - -func (fts *FleetTestSuite) thePolicyWillReflectTheChangeInTheSecurityApp() error { - agentID, err := getAgentID(fts.Hostname) - if err != nil { - return err - } - - maxTimeout := time.Duration(timeoutFactor) * time.Minute * 2 - retryCount := 1 - - exp := e2e.GetExponentialBackOff(maxTimeout) - - getEventsFn := func() error { - err := getAgentEvents("endpoint-security", agentID, fts.Integration.packageConfigID, fts.PolicyUpdatedAt) - if err != nil { - log.WithFields(log.Fields{ - "elapsedTime": exp.GetElapsedTime(), - "err": err, - "retries": retryCount, - }).Warn("There are no events for the agent in Fleet") - retryCount++ - - return err - } - - log.WithFields(log.Fields{ - "elapsedTime": exp.GetElapsedTime(), - "retries": retryCount, - }).Info("There are events for the agent in Fleet") - return nil - } - - err = backoff.Retry(getEventsFn, exp) - if err != nil { - return err - } - - return nil -} - -// theVersionOfThePackageIsInstalled installs a package in a version -func (fts *FleetTestSuite) theVersionOfThePackageIsInstalled(version string, packageName string) error { - log.WithFields(log.Fields{ - "package": packageName, - "version": version, - }).Trace("Checking if package version is installed") - - name, version, err := getIntegrationLatestVersion(packageName) - if err != nil { - return err - } - - installedIntegration, err := installIntegrationAssets(name, version) - if err != nil { - return err - } - fts.Integration = installedIntegration - - return nil -} - -func (fts *FleetTestSuite) anAttemptToEnrollANewAgentFails() error { - log.Trace("Enrolling a new agent with an revoked token") - - installer := fts.getInstaller() - - profile := installer.profile // name of the runtime dependencies compose file - - containerName := fmt.Sprintf("%s_%s_%s_%d", profile, fts.Image+"-systemd", ElasticAgentServiceName, 2) // name of the new container - - err := deployAgentToFleet(installer, containerName, fts.CurrentToken) - // the installation process for TAR includes the enrollment - if installer.installerType != "tar" { - if err != nil { - return err - } - - err = installer.EnrollFn(fts.CurrentToken) - if err == nil { - err = fmt.Errorf("The agent was enrolled although the token was previously revoked") - - log.WithFields(log.Fields{ - "tokenID": fts.CurrentTokenID, - "error": err, - }).Error(err.Error()) - - return err - } - - log.WithFields(log.Fields{ - "err": err, - "token": fts.CurrentToken, - }).Debug("As expected, it's not possible to enroll an agent with a revoked token") - return nil - } - - // checking the error message produced by the install command in TAR installer - // to distinguish from other install errors - if err != nil && strings.HasPrefix(err.Error(), "Failed to install the agent with subcommand:") { - log.WithFields(log.Fields{ - "err": err, - "token": fts.CurrentToken, - }).Debug("As expected, it's not possible to enroll an agent with a revoked token") - return nil - } - - return err -} - -func (fts *FleetTestSuite) removeToken() error { - revokeTokenURL := fleetEnrollmentTokenURL + "/" + fts.CurrentTokenID - deleteReq := createDefaultHTTPRequest(revokeTokenURL) - - body, err := curl.Delete(deleteReq) - if err != nil { - log.WithFields(log.Fields{ - "tokenID": fts.CurrentTokenID, - "body": body, - "error": err, - "url": revokeTokenURL, - }).Error("Could not delete token") - return err - } - - log.WithFields(log.Fields{ - "tokenID": fts.CurrentTokenID, - }).Debug("The token was deleted") - - return nil -} - -// unenrollHostname deletes the statuses for an existing agent, filtering by hostname -func (fts *FleetTestSuite) unenrollHostname(force bool) error { - log.Tracef("Un-enrolling all agentIDs for %s", fts.Hostname) - - jsonParsed, err := getOnlineAgents(true) - if err != nil { - return err - } - - hosts := jsonParsed.Path("list").Children() - - for _, host := range hosts { - hostname := host.Path("local_metadata.host.hostname").Data().(string) - // a hostname has an agentID by status - if hostname == fts.Hostname { - agentID := host.Path("id").Data().(string) - log.WithFields(log.Fields{ - "hostname": fts.Hostname, - "agentID": agentID, - }).Debug("Un-enrolling agent in Fleet") - - err := unenrollAgent(agentID, force) - if err != nil { - return err - } - } - } - - return nil -} - -func (fts *FleetTestSuite) upgradeAgent(version string) error { - agentID, err := getAgentID(fts.Hostname) - if err != nil { - return err - } - - upgradeReq := curl.HTTPRequest{ - BasicAuthUser: "elastic", - BasicAuthPassword: "changeme", - Headers: map[string]string{ - "Content-Type": "application/json", - "kbn-xsrf": "true", - }, - URL: fmt.Sprintf(fleetAgentUpgradeURL, agentID), - Payload: `{"version":"` + version + `", "force": true}`, - } - - if content, err := curl.Post(upgradeReq); err != nil { - return errors.Wrap(err, content) - } - - return nil -} - -// checkFleetConfiguration checks that Fleet configuration is not missing -// any requirements and is read. To achieve it, a GET request is executed -func checkFleetConfiguration() error { - getReq := curl.HTTPRequest{ - BasicAuthUser: "elastic", - BasicAuthPassword: "changeme", - Headers: map[string]string{ - "Content-Type": "application/json", - "kbn-xsrf": "e2e-tests", - }, - URL: fleetSetupURL, - } - - log.Trace("Ensuring Fleet setup was initialised") - responseBody, err := curl.Get(getReq) - if err != nil { - log.WithFields(log.Fields{ - "responseBody": responseBody, - }).Error("Could not check Kibana setup for Fleet") - return err - } - - if !strings.Contains(responseBody, `"isReady":true,"missing_requirements":[]`) { - err = fmt.Errorf("Kibana has not been initialised: %s", responseBody) - log.Error(err.Error()) - return err - } - - log.WithFields(log.Fields{ - "responseBody": responseBody, - }).Info("Kibana setup initialised") - - return nil -} - -// createFleetConfiguration sends a POST request to Fleet forcing the -// recreation of the configuration -func createFleetConfiguration() error { - postReq := createDefaultHTTPRequest(fleetSetupURL) - postReq.Payload = `{ - "forceRecreate": true - }` - - body, err := curl.Post(postReq) - if err != nil { - log.WithFields(log.Fields{ - "body": body, - "error": err, - "url": fleetSetupURL, - }).Error("Could not initialise Fleet setup") - return err - } - - log.WithFields(log.Fields{ - "responseBody": body, - }).Info("Fleet setup done") - - return nil -} - -// createDefaultHTTPRequest Creates a default HTTP request, including the basic auth, -// JSON content type header, and a specific header that is required by Kibana -func createDefaultHTTPRequest(url string) curl.HTTPRequest { - return curl.HTTPRequest{ - BasicAuthUser: "elastic", - BasicAuthPassword: "changeme", - Headers: map[string]string{ - "Content-Type": "application/json", - "kbn-xsrf": "e2e-tests", - }, - URL: url, - } -} - -// createFleetToken sends a POST request to Fleet creating a new token with a name -func createFleetToken(name string, policyID string) (*gabs.Container, error) { - postReq := createDefaultHTTPRequest(fleetEnrollmentTokenURL) - postReq.Payload = `{ - "policy_id": "` + policyID + `", - "name": "` + name + `" - }` - - body, err := curl.Post(postReq) - if err != nil { - log.WithFields(log.Fields{ - "body": body, - "error": err, - "url": fleetSetupURL, - }).Error("Could not create Fleet token") - return nil, err - } - - jsonParsed, err := gabs.ParseJSON([]byte(body)) - if err != nil { - log.WithFields(log.Fields{ - "error": err, - "responseBody": body, - }).Error("Could not parse response into JSON") - return nil, err - } - - tokenItem := jsonParsed.Path("item") - - log.WithFields(log.Fields{ - "tokenId": tokenItem.Path("id").Data().(string), - "apiKeyId": tokenItem.Path("api_key_id").Data().(string), - }).Debug("Fleet token created") - - return tokenItem, nil -} - -func deployAgentToFleet(installer ElasticAgentInstaller, containerName string, token string) error { - profile := installer.profile // name of the runtime dependencies compose file - service := installer.service // name of the service - serviceTag := installer.tag // docker tag of the service - - envVarsPrefix := strings.ReplaceAll(service, "-", "_") - - // let's start with Centos 7 - profileEnv[envVarsPrefix+"Tag"] = serviceTag - // we are setting the container name because Centos service could be reused by any other test suite - profileEnv[envVarsPrefix+"ContainerName"] = containerName - // define paths where the binary will be mounted - profileEnv[envVarsPrefix+"AgentBinarySrcPath"] = installer.path - profileEnv[envVarsPrefix+"AgentBinaryTargetPath"] = "/" + installer.name - - serviceManager := services.NewServiceManager() - - err := serviceManager.AddServicesToCompose(profile, []string{service}, profileEnv) - if err != nil { - log.WithFields(log.Fields{ - "service": service, - "tag": serviceTag, - }).Error("Could not run the target box") - return err - } - - err = installer.PreInstallFn() - if err != nil { - return err - } - - err = installer.InstallFn(containerName, token) - if err != nil { - return err - } - - return installer.PostInstallFn() -} - -// getAgentDefaultPolicy sends a GET request to Fleet for the existing default policy -func getAgentDefaultPolicy() (*gabs.Container, error) { - r := createDefaultHTTPRequest(ingestManagerAgentPoliciesURL) - body, err := curl.Get(r) - if err != nil { - log.WithFields(log.Fields{ - "body": body, - "error": err, - "url": ingestManagerAgentPoliciesURL, - }).Error("Could not get Fleet's policies") - return nil, err - } - - jsonParsed, err := gabs.ParseJSON([]byte(body)) - if err != nil { - log.WithFields(log.Fields{ - "error": err, - "responseBody": body, - }).Error("Could not parse response into JSON") - return nil, err - } - - // data streams should contain array of elements - policies := jsonParsed.Path("items") - - log.WithFields(log.Fields{ - "count": len(policies.Children()), - }).Trace("Fleet policies retrieved") - - // TODO: perform a strong check to capture default policy - defaultPolicy := policies.Index(0) - - return defaultPolicy, nil -} - -func getAgentEvents(applicationName string, agentID string, packagePolicyID string, updatedAt string) error { - url := fmt.Sprintf(fleetAgentEventsURL, agentID) - getReq := createDefaultHTTPRequest(url) - getReq.QueryString = "page=1&perPage=20" - - body, err := curl.Get(getReq) - if err != nil { - log.WithFields(log.Fields{ - "agentID": agentID, - "application": applicationName, - "body": body, - "error": err, - "packagePolicyID": packagePolicyID, - "url": url, - }).Error("Could not get agent events from Fleet") - return err - } - - jsonResponse, err := gabs.ParseJSON([]byte(body)) - if err != nil { - log.WithFields(log.Fields{ - "error": err, - "responseBody": body, - }).Error("Could not parse response into JSON") - return err - } - - listItems := jsonResponse.Path("list").Children() - for _, item := range listItems { - message := item.Path("message").Data().(string) - // we use a string because we are not able to process what comes in the event, so we will do - // an alphabetical order, as they share same layout but different millis and timezone format - timestamp := item.Path("timestamp").Data().(string) - - log.WithFields(log.Fields{ - "agentID": agentID, - "application": applicationName, - "event_at": timestamp, - "message": message, - "packagePolicyID": packagePolicyID, - "updated_at": updatedAt, - }).Trace("Event found") - - matches := (strings.Contains(message, applicationName) && - strings.Contains(message, "["+agentID+"]: State changed to") && - strings.Contains(message, "Protecting with policy {"+packagePolicyID+"}")) - - if matches && timestamp > updatedAt { - log.WithFields(log.Fields{ - "application": applicationName, - "event_at": timestamp, - "packagePolicyID": packagePolicyID, - "updated_at": updatedAt, - "message": message, - }).Info("Event after the update was found") - return nil - } - } - - return fmt.Errorf("No %s events where found for the agent in the %s policy", applicationName, packagePolicyID) -} - -// getAgentID sends a GET request to Fleet for a existing hostname -// This method will retrieve the only agent ID for a hostname in the online status -func getAgentID(agentHostname string) (string, error) { - log.Tracef("Retrieving agentID for %s", agentHostname) - - jsonParsed, err := getOnlineAgents(false) - if err != nil { - return "", err - } - - hosts := jsonParsed.Path("list").Children() - - for _, host := range hosts { - hostname := host.Path("local_metadata.host.hostname").Data().(string) - if hostname == agentHostname { - agentID := host.Path("id").Data().(string) - log.WithFields(log.Fields{ - "hostname": agentHostname, - "agentID": agentID, - }).Debug("Agent listed in Fleet with online status") - return agentID, nil - } - } - - return "", nil -} - -// getDataStreams sends a GET request to Fleet for the existing data-streams -// if called prior to any Agent being deployed it should return a list of -// zero data streams as: { "data_streams": [] }. If called after the Agent -// is running, it will return a list of (currently in 7.8) 20 streams -func getDataStreams() (*gabs.Container, error) { - r := createDefaultHTTPRequest(ingestManagerDataStreamsURL) - body, err := curl.Get(r) - if err != nil { - log.WithFields(log.Fields{ - "body": body, - "error": err, - "url": ingestManagerDataStreamsURL, - }).Error("Could not get Fleet's data streams for the agent") - return nil, err - } - - jsonParsed, err := gabs.ParseJSON([]byte(body)) - if err != nil { - log.WithFields(log.Fields{ - "error": err, - "responseBody": body, - }).Error("Could not parse response into JSON") - return nil, err - } - - // data streams should contain array of elements - dataStreams := jsonParsed.Path("data_streams") - - log.WithFields(log.Fields{ - "count": len(dataStreams.Children()), - }).Debug("Data Streams retrieved") - - return dataStreams, nil -} - -// getOnlineAgents sends a GET request to Fleet for the existing online agents -// Will return the JSON object representing the response of querying Fleet's Agents -// endpoint -func getOnlineAgents(showInactive bool) (*gabs.Container, error) { - r := createDefaultHTTPRequest(fleetAgentsURL) - // let's not URL encode the querystring, as it seems Kibana is not handling - // the request properly, returning an 400 Bad Request error with this message: - // [request query.page=1&perPage=20&showInactive=true]: definition for this key is missing - r.EncodeURL = false - r.QueryString = fmt.Sprintf("page=1&perPage=20&showInactive=%t", showInactive) - - body, err := curl.Get(r) - if err != nil { - log.WithFields(log.Fields{ - "body": body, - "error": err, - "url": r.GetURL(), - }).Error("Could not get Fleet's online agents") - return nil, err - } - - jsonResponse, err := gabs.ParseJSON([]byte(body)) - if err != nil { - log.WithFields(log.Fields{ - "error": err, - "responseBody": body, - }).Error("Could not parse response into JSON") - return nil, err - } - - return jsonResponse, nil -} - -// isAgentInStatus extracts the status for an agent, identified by its hostname -// It will query Fleet's agents endpoint -func isAgentInStatus(agentID string, desiredStatus string) (bool, error) { - r := createDefaultHTTPRequest(fleetAgentsURL + "/" + agentID) - body, err := curl.Get(r) - if err != nil { - log.WithFields(log.Fields{ - "body": body, - "error": err, - "url": r.GetURL(), - }).Error("Could not get agent in Fleet") - return false, err - } - - jsonResponse, err := gabs.ParseJSON([]byte(body)) - - agentStatus := jsonResponse.Path("item.status").Data().(string) - - return (strings.ToLower(agentStatus) == strings.ToLower(desiredStatus)), nil -} - -func unenrollAgent(agentID string, force bool) error { - unEnrollURL := fmt.Sprintf(fleetAgentsUnEnrollURL, agentID) - postReq := createDefaultHTTPRequest(unEnrollURL) - - if force { - postReq.Payload = `{ - "force": true - }` - } - - body, err := curl.Post(postReq) - if err != nil { - log.WithFields(log.Fields{ - "agentID": agentID, - "body": body, - "error": err, - "url": unEnrollURL, - }).Error("Could unenroll agent") - return err - } - - log.WithFields(log.Fields{ - "agentID": agentID, - }).Debug("Fleet agent was unenrolled") - - return nil -} diff --git a/e2e/_suites/fleet/ingest-manager_test.go b/e2e/_suites/fleet/ingest-manager_test.go deleted file mode 100644 index 511e9b4f53..0000000000 --- a/e2e/_suites/fleet/ingest-manager_test.go +++ /dev/null @@ -1,312 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package main - -import ( - "context" - "fmt" - "os" - "path" - "strings" - "time" - - "github.com/cucumber/godog" - "github.com/cucumber/messages-go/v10" - "github.com/elastic/e2e-testing/cli/config" - "github.com/elastic/e2e-testing/cli/docker" - "github.com/elastic/e2e-testing/cli/services" - "github.com/elastic/e2e-testing/cli/shell" - "github.com/elastic/e2e-testing/e2e" - log "github.com/sirupsen/logrus" -) - -// developerMode tears down the backend services (ES, Kibana, Package Registry) -// after a test suite. This is the desired behavior, but when developing, we maybe want to keep -// them running to speed up the development cycle. -// It can be overriden by the DEVELOPER_MODE env var -var developerMode = false - -// ElasticAgentProcessName the name of the process for the Elastic Agent -const ElasticAgentProcessName = "elastic-agent" - -// ElasticAgentServiceName the name of the service for the Elastic Agent -const ElasticAgentServiceName = "elastic-agent" - -// FleetProfileName the name of the profile to run the runtime, backend services -const FleetProfileName = "fleet" - -var agentVersionBase = "8.0.0-SNAPSHOT" - -// agentVersion is the version of the agent to use -// It can be overriden by ELASTIC_AGENT_VERSION env var -var agentVersion = agentVersionBase - -// agentStaleVersion is the version of the agent to use as a base during upgrade -// It can be overriden by ELASTIC_AGENT_STALE_VERSION env var. Using latest GA as a default. -var agentStaleVersion = "7.10.0" - -// stackVersion is the version of the stack to use -// It can be overriden by STACK_VERSION env var -var stackVersion = agentVersionBase - -// profileEnv is the environment to be applied to any execution -// affecting the runtime dependencies (or profile) -var profileEnv map[string]string - -// timeoutFactor a multiplier for the max timeout when doing backoff retries. -// It can be overriden by TIMEOUT_FACTOR env var -var timeoutFactor = 3 - -// All URLs running on localhost as Kibana is expected to be exposed there -const kibanaBaseURL = "http://localhost:5601" - -var kibanaClient *services.KibanaClient - -func init() { - config.Init() - - kibanaClient = services.NewKibanaClient() - - developerMode, _ = shell.GetEnvBool("DEVELOPER_MODE") - if developerMode { - log.Info("Running in Developer mode 💻: runtime dependencies between different test runs will be reused to speed up dev cycle") - } - - // check if base version is an alias - agentVersionBase = e2e.GetElasticArtifactVersion(agentVersionBase) - - timeoutFactor = shell.GetEnvInteger("TIMEOUT_FACTOR", timeoutFactor) - agentVersion = shell.GetEnv("ELASTIC_AGENT_VERSION", agentVersionBase) - agentStaleVersion = shell.GetEnv("ELASTIC_AGENT_STALE_VERSION", agentStaleVersion) - - // check if version is an alias - agentVersion = e2e.GetElasticArtifactVersion(agentVersion) - - stackVersion = shell.GetEnv("STACK_VERSION", stackVersion) -} - -func IngestManagerFeatureContext(s *godog.Suite) { - imts := IngestManagerTestSuite{ - Fleet: &FleetTestSuite{ - Installers: map[string]ElasticAgentInstaller{ - "centos-systemd": GetElasticAgentInstaller("centos", "systemd"), - "centos-tar": GetElasticAgentInstaller("centos", "tar"), - "debian-systemd": GetElasticAgentInstaller("debian", "systemd"), - "debian-tar": GetElasticAgentInstaller("debian", "tar"), - }, - }, - StandAlone: &StandAloneTestSuite{}, - } - serviceManager := services.NewServiceManager() - - s.Step(`^the "([^"]*)" process is in the "([^"]*)" state on the host$`, imts.processStateOnTheHost) - - imts.Fleet.contributeSteps(s) - imts.StandAlone.contributeSteps(s) - - s.BeforeSuite(func() { - log.Trace("Installing Fleet runtime dependencies") - - workDir, _ := os.Getwd() - profileEnv = map[string]string{ - "stackVersion": stackVersion, - "kibanaConfigPath": path.Join(workDir, "configurations", "kibana.config.yml"), - } - - profile := FleetProfileName - err := serviceManager.RunCompose(true, []string{profile}, profileEnv) - if err != nil { - log.WithFields(log.Fields{ - "profile": profile, - }).Fatal("Could not run the runtime dependencies for the profile.") - } - - minutesToBeHealthy := time.Duration(timeoutFactor) * time.Minute - healthy, err := e2e.WaitForElasticsearch(minutesToBeHealthy) - if !healthy { - log.WithFields(log.Fields{ - "error": err, - "minutes": minutesToBeHealthy, - }).Fatal("The Elasticsearch cluster could not get the healthy status") - } - - healthyKibana, err := kibanaClient.WaitForKibana(minutesToBeHealthy) - if !healthyKibana { - log.WithFields(log.Fields{ - "error": err, - "minutes": minutesToBeHealthy, - }).Fatal("The Kibana instance could not get the healthy status") - } - - imts.Fleet.setup() - - imts.StandAlone.RuntimeDependenciesStartDate = time.Now().UTC() - }) - s.BeforeScenario(func(*messages.Pickle) { - log.Trace("Before Fleet scenario") - - imts.StandAlone.Cleanup = false - - imts.Fleet.beforeScenario() - }) - s.AfterSuite(func() { - if !developerMode { - log.Debug("Destroying Fleet runtime dependencies") - profile := FleetProfileName - - err := serviceManager.StopCompose(true, []string{profile}) - if err != nil { - log.WithFields(log.Fields{ - "error": err, - "profile": profile, - }).Warn("Could not destroy the runtime dependencies for the profile.") - } - } - - installers := imts.Fleet.Installers - for k, v := range installers { - agentPath := v.path - if _, err := os.Stat(agentPath); err == nil { - err = os.Remove(agentPath) - if err != nil { - log.WithFields(log.Fields{ - "err": err, - "installer": k, - "path": agentPath, - }).Warn("Elastic Agent binary could not be removed.") - } else { - log.WithFields(log.Fields{ - "installer": k, - "path": agentPath, - }).Debug("Elastic Agent binary was removed.") - } - } - } - }) - s.AfterScenario(func(*messages.Pickle, error) { - log.Trace("After Fleet scenario") - - if imts.StandAlone.Cleanup { - imts.StandAlone.afterScenario() - } - - if imts.Fleet.Cleanup { - imts.Fleet.afterScenario() - } - }) -} - -// IngestManagerTestSuite represents a test suite, holding references to the pieces needed to run the tests -type IngestManagerTestSuite struct { - Fleet *FleetTestSuite - StandAlone *StandAloneTestSuite -} - -func (imts *IngestManagerTestSuite) processStateOnTheHost(process string, state string) error { - profile := FleetProfileName - serviceName := ElasticAgentServiceName - - containerName := fmt.Sprintf("%s_%s_%s_%d", profile, imts.Fleet.Image+"-systemd", serviceName, 1) - if imts.StandAlone.Hostname != "" { - containerName = fmt.Sprintf("%s_%s_%d", profile, serviceName, 1) - } - - return checkProcessStateOnTheHost(containerName, process, state) -} - -// checkElasticAgentVersion returns a fallback version (agentVersionBase) if the version set by the environment is empty -func checkElasticAgentVersion(version string) string { - environmentVersion := os.Getenv("ELASTIC_AGENT_VERSION") - - if environmentVersion == "" { - return agentVersionBase - } - - if strings.HasPrefix(strings.ToLower(environmentVersion), "pr-") { - return agentVersionBase - } - - return version -} - -// name of the container for the service: -// we are using the Docker client instead of docker-compose -// because it does not support returning the output of a -// command: it simply returns error level -func checkProcessStateOnTheHost(containerName string, process string, state string) error { - timeout := time.Duration(timeoutFactor) * time.Minute - - err := e2e.WaitForProcess(containerName, process, state, timeout) - if err != nil { - if state == "started" { - log.WithFields(log.Fields{ - "container ": containerName, - "error": err, - "timeout": timeout, - }).Error("The process was not found but should be present") - } else { - log.WithFields(log.Fields{ - "container": containerName, - "error": err, - "timeout": timeout, - }).Error("The process was found but shouldn't be present") - } - - return err - } - - return nil -} - -func execCommandInService(profile string, image string, serviceName string, cmds []string, detach bool) error { - serviceManager := services.NewServiceManager() - - composes := []string{ - profile, // profile name - image, // image for the service - } - composeArgs := []string{"exec", "-T"} - if detach { - composeArgs = append(composeArgs, "-d") - } - composeArgs = append(composeArgs, serviceName) - composeArgs = append(composeArgs, cmds...) - - err := serviceManager.RunCommand(profile, composes, composeArgs, profileEnv) - if err != nil { - log.WithFields(log.Fields{ - "command": cmds, - "error": err, - "service": serviceName, - }).Error("Could not execute command in container") - - return err - } - - return nil -} - -// we need the container name because we use the Docker Client instead of Docker Compose -func getContainerHostname(containerName string) (string, error) { - log.WithFields(log.Fields{ - "containerName": containerName, - }).Trace("Retrieving container name from the Docker client") - - hostname, err := docker.ExecCommandIntoContainer(context.Background(), containerName, "root", []string{"cat", "/etc/hostname"}) - if err != nil { - log.WithFields(log.Fields{ - "containerName": containerName, - "error": err, - }).Error("Could not retrieve container name from the Docker client") - return "", err - } - - log.WithFields(log.Fields{ - "containerName": containerName, - "hostname": hostname, - }).Info("Hostname retrieved from the Docker client") - - return hostname, nil -} diff --git a/e2e/_suites/fleet/integrations.go b/e2e/_suites/fleet/integrations.go deleted file mode 100644 index 2342bb4698..0000000000 --- a/e2e/_suites/fleet/integrations.go +++ /dev/null @@ -1,344 +0,0 @@ -package main - -import ( - "fmt" - "strings" - - "github.com/Jeffail/gabs/v2" - log "github.com/sirupsen/logrus" -) - -// title for the Elastic Endpoint integration in the package registry. -// This value could change depending on the version of the package registry -// We are using the title because the feature files have to be super readable -// and the title is more readable than the name -const elasticEnpointIntegrationTitle = "Endpoint Security" - -// IntegrationPackage used to share information about a integration -type IntegrationPackage struct { - packageConfigID string `json:"packageConfigId"` - name string `json:"name"` - title string `json:"title"` - version string `json:"version"` - json *gabs.Container // json representation of the integration -} - -// addIntegrationToPolicy sends a POST request to Fleet adding an integration to a configuration -func addIntegrationToPolicy(integrationPackage IntegrationPackage, policyID string) (string, error) { - name := integrationPackage.name + "-test-name" - description := integrationPackage.title + "-test-description" - - body, err := kibanaClient.AddIntegrationToPolicy(integrationPackage.name, name, integrationPackage.title, description, integrationPackage.version, policyID) - if err != nil { - return "", err - } - - jsonParsed, err := gabs.ParseJSON([]byte(body)) - if err != nil { - log.WithFields(log.Fields{ - "error": err, - "responseBody": body, - }).Error("Could not parse response into JSON") - return "", err - } - - integrationConfigurationID := jsonParsed.Path("item.id").Data().(string) - - log.WithFields(log.Fields{ - "policyID": policyID, - "integrationConfigurationID": integrationConfigurationID, - "integration": integrationPackage.name, - "version": integrationPackage.version, - }).Info("Integration added to the configuration") - - return integrationConfigurationID, nil -} - -// deleteIntegrationFromPolicy sends a POST request to Fleet deleting an integration from a configuration -func deleteIntegrationFromPolicy(integrationPackage IntegrationPackage, policyID string) error { - _, err := kibanaClient.DeleteIntegrationFromPolicy(integrationPackage.packageConfigID) - if err != nil { - return err - } - - log.WithFields(log.Fields{ - "policyID": policyID, - "integration": integrationPackage.name, - "packageConfigId": integrationPackage.packageConfigID, - "version": integrationPackage.version, - }).Info("Integration deleted from the configuration") - - return nil -} - -// getIntegration returns metadata from an integration from Fleet, without the package ID -func getIntegration(packageName string, version string) (IntegrationPackage, error) { - body, err := kibanaClient.GetIntegration(packageName, version) - if err != nil { - return IntegrationPackage{}, err - } - - jsonParsed, err := gabs.ParseJSON([]byte(body)) - if err != nil { - log.WithFields(log.Fields{ - "error": err, - "responseBody": body, - }).Error("Could not parse get response into JSON") - return IntegrationPackage{}, err - } - - response := jsonParsed.Path("response") - integrationPackage := IntegrationPackage{ - name: response.Path("name").Data().(string), - title: response.Path("title").Data().(string), - version: response.Path("latestVersion").Data().(string), - } - - return integrationPackage, nil -} - -// getIntegrationFromAgentPolicy inspects the integrations added to an agent policy, returning the -// a struct representing the package, including the packageID for the integration in the policy -func getIntegrationFromAgentPolicy(packageName string, agentPolicyID string) (IntegrationPackage, error) { - body, err := kibanaClient.GetIntegrationFromAgentPolicy(agentPolicyID) - if err != nil { - return IntegrationPackage{}, err - } - - jsonParsed, err := gabs.ParseJSON([]byte(body)) - if err != nil { - log.WithFields(log.Fields{ - "error": err, - "responseBody": body, - }).Error("Could not parse response into JSON") - return IntegrationPackage{}, err - } - - packagePolicies := jsonParsed.Path("item.package_policies").Children() - for _, packagePolicy := range packagePolicies { - title := packagePolicy.Path("package.title").Data().(string) - if title == packageName { - integrationPackage := IntegrationPackage{ - packageConfigID: packagePolicy.Path("id").Data().(string), - name: packagePolicy.Path("package.name").Data().(string), - title: title, - version: packagePolicy.Path("package.version").Data().(string), - json: packagePolicy, - } - - log.WithFields(log.Fields{ - "package": integrationPackage, - "policyID": agentPolicyID, - }).Debug("Package policy found in the configuration") - - return integrationPackage, nil - } - } - - return IntegrationPackage{}, fmt.Errorf("%s package policy not found in the configuration", packageName) -} - -// getIntegrationLatestVersion sends a GET request to Fleet for the existing integrations -// checking if the desired integration exists in the package registry. If so, it will -// return name and version (latest) of the integration -func getIntegrationLatestVersion(integrationName string) (string, string, error) { - body, err := kibanaClient.GetIntegrations() - if err != nil { - return "", "", err - } - - jsonParsed, err := gabs.ParseJSON([]byte(body)) - if err != nil { - log.WithFields(log.Fields{ - "error": err, - "responseBody": body, - }).Error("Could not parse response into JSON") - return "", "", err - } - - // data streams should contain array of elements - integrations := jsonParsed.Path("response").Children() - - log.WithFields(log.Fields{ - "count": len(integrations), - }).Trace("Integrations retrieved") - - for _, integration := range integrations { - title := integration.Path("title").Data().(string) - if strings.ToLower(title) == strings.ToLower(integrationName) { - name := integration.Path("name").Data().(string) - version := integration.Path("version").Data().(string) - log.WithFields(log.Fields{ - "name": name, - "title": title, - "version": version, - }).Debug("Integration in latest version found") - return name, version, nil - } - } - - return "", "", fmt.Errorf("The %s integration was not found", integrationName) -} - -// getMetadataFromSecurityApp sends a POST request to Endpoint retrieving the metadata that -// is listed in the Security App -func getMetadataFromSecurityApp() (*gabs.Container, error) { - body, err := kibanaClient.GetMetadataFromSecurityApp() - if err != nil { - return nil, err - } - - jsonParsed, err := gabs.ParseJSON([]byte(body)) - if err != nil { - log.WithFields(log.Fields{ - "error": err, - "responseBody": body, - }).Error("Could not parse response into JSON") - return nil, err - } - - hosts := jsonParsed.Path("hosts") - - log.WithFields(log.Fields{ - "hosts": hosts, - }).Trace("Hosts in the Security App") - - return hosts, nil -} - -// installIntegration sends a POST request to Fleet installing the assets for an integration -func installIntegrationAssets(integration string, version string) (IntegrationPackage, error) { - body, err := kibanaClient.InstallIntegrationAssets(integration, version) - if err != nil { - return IntegrationPackage{}, err - } - - log.WithFields(log.Fields{ - "integration": integration, - "version": version, - }).Info("Assets for the integration where installed") - - jsonParsed, err := gabs.ParseJSON([]byte(body)) - if err != nil { - log.WithFields(log.Fields{ - "error": err, - "responseBody": body, - }).Error("Could not parse install response into JSON") - return IntegrationPackage{}, err - } - response := jsonParsed.Path("response").Index(0) - - packageConfigID := response.Path("id").Data().(string) - - // get the integration again in the case it's already installed - integrationPackage, err := getIntegration(integration, version) - if err != nil { - return IntegrationPackage{}, err - } - - integrationPackage.packageConfigID = packageConfigID - - return integrationPackage, nil -} - -// isAgentListedInSecurityApp retrieves the hosts from Endpoint to check if a hostname -// is listed in the Security App. For that, we will inspect the metadata, and will iterate -// through the hosts, until we get the proper hostname. -func isAgentListedInSecurityApp(hostName string) (*gabs.Container, error) { - hosts, err := getMetadataFromSecurityApp() - if err != nil { - return nil, err - } - - for _, host := range hosts.Children() { - metadataHostname := host.Path("metadata.host.hostname").Data().(string) - if metadataHostname == hostName { - log.WithFields(log.Fields{ - "hostname": hostName, - }).Debug("Hostname for the agent listed in the Security App") - - return host, nil - } - } - - return nil, nil -} - -// isAgentListedInSecurityAppWithStatus inspects the metadata field for a hostname, obtained from -// the security App. We will check if the status matches the desired status, returning an error -// if the agent is not present in the Security App -func isAgentListedInSecurityAppWithStatus(hostName string, desiredStatus string) (bool, error) { - host, err := isAgentListedInSecurityApp(hostName) - if err != nil { - log.WithFields(log.Fields{ - "hostname": hostName, - "error": err, - }).Error("There was an error getting the agent in the Administration view in the Security app") - return false, err - } - - if host == nil { - return false, fmt.Errorf("The host %s is not listed in the Administration view in the Security App", hostName) - } - - hostStatus := host.Path("host_status").Data().(string) - log.WithFields(log.Fields{ - "desiredStatus": desiredStatus, - "hostname": hostName, - "status": hostStatus, - }).Debug("Hostname for the agent listed with desired status in the Administration view in the Security App") - - return (hostStatus == desiredStatus), nil -} - -// isPolicyResponseListedInSecurityApp sends a POST request to Endpoint to check if a hostname -// is listed in the Security App. For that, we will inspect the metadata, and will iterate -// through the hosts, until we get the policy status, finally checking for the success -// status. -func isPolicyResponseListedInSecurityApp(agentID string) (bool, error) { - hosts, err := getMetadataFromSecurityApp() - if err != nil { - return false, err - } - - for _, host := range hosts.Children() { - metadataAgentID := host.Path("metadata.elastic.agent.id").Data().(string) - name := host.Path("metadata.Endpoint.policy.applied.name").Data().(string) - status := host.Path("metadata.Endpoint.policy.applied.status").Data().(string) - if metadataAgentID == agentID { - log.WithFields(log.Fields{ - "agentID": agentID, - "name": name, - "status": status, - }).Debug("Policy response for the agent listed in the Security App") - - return (status == "success"), nil - } - } - - return false, nil -} - -// updateIntegrationPackageConfig sends a PUT request to Fleet updating integration -// configuration -func updateIntegrationPackageConfig(packageConfigID string, payload string) (*gabs.Container, error) { - body, err := kibanaClient.UpdateIntegrationPackageConfig(packageConfigID, payload) - if err != nil { - return nil, err - } - - jsonParsed, err := gabs.ParseJSON([]byte(body)) - if err != nil { - log.WithFields(log.Fields{ - "error": err, - "responseBody": body, - }).Error("Could not parse response into JSON") - return nil, err - } - - log.WithFields(log.Fields{ - "policyID": packageConfigID, - }).Debug("Configuration for the integration was updated") - - return jsonParsed, nil -} diff --git a/e2e/_suites/fleet/services.go b/e2e/_suites/fleet/services.go deleted file mode 100644 index 122f7e588a..0000000000 --- a/e2e/_suites/fleet/services.go +++ /dev/null @@ -1,626 +0,0 @@ -package main - -import ( - "context" - "fmt" - "os" - "strings" - "time" - - "github.com/elastic/e2e-testing/cli/docker" - "github.com/elastic/e2e-testing/cli/shell" - "github.com/elastic/e2e-testing/e2e" - log "github.com/sirupsen/logrus" -) - -// to avoid downloading the same artifacts, we are adding this map to cache the URL of the downloaded binaries, using as key -// the URL of the artifact. If another installer is trying to download the same URL, it will return the location of the -// already downloaded artifact. -var binariesCache = map[string]string{} - -// ElasticAgentInstaller represents how to install an agent, depending of the box type -type ElasticAgentInstaller struct { - artifactArch string // architecture of the artifact - artifactExtension string // extension of the artifact - artifactName string // name of the artifact - artifactOS string // OS of the artifact - artifactVersion string // version of the artifact - binDir string // location of the binary - commitFile string // elastic agent commit file - EnrollFn func(token string) error - homeDir string // elastic agent home dir - image string // docker image - installerType string - InstallFn func(containerName string, token string) error - InstallCertsFn func() error - logFile string // the name of the log file - logsDir string // location of the logs - name string // the name for the binary - path string // the local path where the agent for the binary is located - processName string // name of the elastic-agent process - profile string // parent docker-compose file - PostInstallFn func() error - PreInstallFn func() error - service string // name of the service - tag string // docker tag - UninstallFn func() error - workingDir string // location of the application -} - -// listElasticAgentWorkingDirContent list Elastic Agent's working dir content -func (i *ElasticAgentInstaller) listElasticAgentWorkingDirContent(containerName string) (string, error) { - cmd := []string{ - "ls", "-l", i.workingDir, - } - - content, err := docker.ExecCommandIntoContainer(context.Background(), containerName, "root", cmd) - if err != nil { - return "", err - } - - log.WithFields(log.Fields{ - "workingDir": i.workingDir, - "containerName": containerName, - "content": content, - }).Debug("Agent working dir content") - - return content, nil -} - -// getElasticAgentHash uses Elastic Agent's home dir to read the file with agent's build hash -// it will return the first six characters of the hash (short hash) -func (i *ElasticAgentInstaller) getElasticAgentHash(containerName string) (string, error) { - commitFile := i.homeDir + i.commitFile - - return getElasticAgentHash(containerName, commitFile) -} - -func getElasticAgentHash(containerName string, commitFile string) (string, error) { - cmd := []string{ - "cat", commitFile, - } - - fullHash, err := docker.ExecCommandIntoContainer(context.Background(), containerName, "root", cmd) - if err != nil { - return "", err - } - - runes := []rune(fullHash) - shortHash := string(runes[0:6]) - - log.WithFields(log.Fields{ - "commitFile": commitFile, - "containerName": containerName, - "hash": fullHash, - "shortHash": shortHash, - }).Debug("Agent build hash found") - - return shortHash, nil -} - -// getElasticAgentLogs uses elastic-agent log dir to read the entire log file -func (i *ElasticAgentInstaller) getElasticAgentLogs(hostname string) error { - containerName := hostname // name of the container, which matches the hostname - - hash, err := i.getElasticAgentHash(containerName) - if err != nil { - log.WithFields(log.Fields{ - "containerName": containerName, - "error": err, - }).Error("Could not get agent hash in the container") - - return err - } - - logFile := i.logsDir + i.logFile - if strings.Contains(logFile, "%s") { - logFile = fmt.Sprintf(logFile, hash) - } - cmd := []string{ - "cat", logFile, - } - - err = execCommandInService(i.profile, i.image, i.service, cmd, false) - if err != nil { - log.WithFields(log.Fields{ - "containerName": containerName, - "command": cmd, - "error": err, - "hash": hash, - }).Error("Could not get agent logs in the container") - - return err - } - - return nil -} - -// runElasticAgentCommand runs a command for the elastic-agent -func runElasticAgentCommand(profile string, image string, service string, process string, command string, arguments []string) error { - cmds := []string{ - process, command, - } - cmds = append(cmds, arguments...) - - err := execCommandInService(profile, image, service, cmds, false) - if err != nil { - log.WithFields(log.Fields{ - "command": cmds, - "profile": profile, - "service": service, - "error": err, - }).Error("Could not run agent command in the box") - - return err - } - - return nil -} - -// downloadAgentBinary it downloads the binary and stores the location of the downloaded file -// into the installer struct, to be used else where -// If the environment variable ELASTIC_AGENT_DOWNLOAD_URL exists, then the artifact to be downloaded will -// be defined by that value -// Else, if the environment variable BEATS_USE_CI_SNAPSHOTS is set, then the artifact -// to be downloaded will be defined by the latest snapshot produced by the Beats CI. -func downloadAgentBinary(artifact string, version string, OS string, arch string, extension string) (string, string, error) { - fileName := fmt.Sprintf("%s-%s-%s.%s", artifact, version, arch, extension) - - handleDownload := func(URL string, fileName string) (string, string, error) { - if val, ok := binariesCache[URL]; ok { - log.WithFields(log.Fields{ - "URL": URL, - "path": val, - }).Debug("Retrieving binary from local cache") - return fileName, val, nil - } - - filePath, err := e2e.DownloadFile(URL) - if err != nil { - return fileName, filePath, err - } - - binariesCache[URL] = filePath - - return fileName, filePath, nil - } - - if downloadURL, exists := os.LookupEnv("ELASTIC_AGENT_DOWNLOAD_URL"); exists { - return handleDownload(downloadURL, fileName) - } - - var downloadURL string - var err error - - useCISnapshots, _ := shell.GetEnvBool("BEATS_USE_CI_SNAPSHOTS") - if useCISnapshots { - log.Debug("Using CI snapshots for the Elastic Agent") - - // We will use the snapshots produced by Beats CI - bucket := "beats-ci-artifacts" - object := fmt.Sprintf("snapshots/%s", fileName) - - // we are setting a version from a pull request: the version of the artifact will be kept as the base one - // i.e. /pull-requests/pr-21100/elastic-agent/elastic-agent-8.0.0-SNAPSHOT-x86_64.rpm - // i.e. /pull-requests/pr-21100/elastic-agent/elastic-agent-8.0.0-SNAPSHOT-amd64.deb - // i.e. /pull-requests/pr-21100/elastic-agent/elastic-agent-8.0.0-SNAPSHOT-linux-x86_64.tar.gz - if strings.HasPrefix(strings.ToLower(version), "pr-") { - fileName = fmt.Sprintf("%s-%s-%s.%s", artifact, agentVersionBase, arch, extension) - if extension == "tar.gz" { - fileName = fmt.Sprintf("%s-%s-%s-%s.%s", artifact, agentVersionBase, OS, arch, extension) - } - log.WithFields(log.Fields{ - "agentVersion": agentVersionBase, - "PR": version, - }).Debug("Using CI snapshots for a pull request") - object = fmt.Sprintf("pull-requests/%s/%s/%s", version, artifact, fileName) - } - - maxTimeout := time.Duration(timeoutFactor) * time.Minute - - downloadURL, err = e2e.GetObjectURLFromBucket(bucket, object, maxTimeout) - if err != nil { - return "", "", err - } - - return handleDownload(downloadURL, fileName) - } - - downloadURL, err = e2e.GetElasticArtifactURL(artifact, checkElasticAgentVersion(version), OS, arch, extension) - if err != nil { - return "", "", err - } - - return handleDownload(downloadURL, fileName) -} - -// GetElasticAgentInstaller returns an installer from a docker image -func GetElasticAgentInstaller(image string, installerType string) ElasticAgentInstaller { - log.WithFields(log.Fields{ - "image": image, - "installer": installerType, - }).Debug("Configuring installer for the agent") - - var installer ElasticAgentInstaller - var err error - if "centos" == image && "tar" == installerType { - installer, err = newTarInstaller("centos", "latest") - } else if "centos" == image && "systemd" == installerType { - installer, err = newCentosInstaller("centos", "latest") - } else if "debian" == image && "tar" == installerType { - installer, err = newTarInstaller("debian", "stretch") - } else if "debian" == image && "systemd" == installerType { - installer, err = newDebianInstaller("debian", "stretch") - } else { - log.WithFields(log.Fields{ - "image": image, - "installer": installerType, - }).Fatal("Sorry, we currently do not support this installer") - return ElasticAgentInstaller{} - } - - if err != nil { - log.WithFields(log.Fields{ - "error": err, - "image": image, - "installer": installerType, - }).Fatal("Sorry, we could not download the installer") - } - return installer -} - -func isSystemdBased(image string) bool { - return strings.HasSuffix(image, "-systemd") -} - -// newCentosInstaller returns an instance of the Centos installer -func newCentosInstaller(image string, tag string) (ElasticAgentInstaller, error) { - image = image + "-systemd" // we want to consume systemd boxes - service := image - profile := FleetProfileName - - // extract the agent in the box, as it's mounted as a volume - artifact := "elastic-agent" - version := agentVersion - os := "linux" - arch := "x86_64" - extension := "rpm" - - binaryName, binaryPath, err := downloadAgentBinary(artifact, version, os, arch, extension) - if err != nil { - log.WithFields(log.Fields{ - "artifact": artifact, - "version": version, - "os": os, - "arch": arch, - "extension": extension, - "error": err, - }).Error("Could not download the binary for the agent") - return ElasticAgentInstaller{}, err - } - - preInstallFn := func() error { - log.Trace("No preinstall commands for Centos + systemd") - return nil - } - installFn := func(containerName string, token string) error { - cmds := []string{"yum", "localinstall", "/" + binaryName, "-y"} - return extractPackage(profile, image, service, cmds) - } - enrollFn := func(token string) error { - args := []string{"http://kibana:5601", token, "-f", "--insecure"} - - return runElasticAgentCommand(profile, image, service, ElasticAgentProcessName, "enroll", args) - } - postInstallFn := func() error { - err = systemctlRun(profile, image, service, "enable") - if err != nil { - return err - } - return systemctlRun(profile, image, service, "start") - } - unInstallFn := func() error { - log.Trace("No uninstall commands for Centos + systemd") - return nil - } - installCertsFn := func() error { - if err := execCommandInService(profile, image, service, []string{"yum", "check-update"}, false); err != nil { - return err - } - if err := execCommandInService(profile, image, service, []string{"yum", "install", "ca-certificates", "-y"}, false); err != nil { - return err - } - if err := execCommandInService(profile, image, service, []string{"update-ca-trust", "force-enable"}, false); err != nil { - return err - } - if err := execCommandInService(profile, image, service, []string{"update-ca-trust", "extract"}, false); err != nil { - return err - } - - return nil - } - - binDir := "/var/lib/elastic-agent/data/elastic-agent-%s/" - - return ElasticAgentInstaller{ - artifactArch: arch, - artifactExtension: extension, - artifactName: artifact, - artifactOS: os, - artifactVersion: version, - binDir: binDir, - commitFile: ".elastic-agent.active.commit", - EnrollFn: enrollFn, - homeDir: "/etc/elastic-agent/", - image: image, - InstallFn: installFn, - InstallCertsFn: installCertsFn, - installerType: "rpm", - logFile: "elastic-agent-json.log", - logsDir: binDir + "logs/", - name: binaryName, - path: binaryPath, - PostInstallFn: postInstallFn, - PreInstallFn: preInstallFn, - processName: ElasticAgentProcessName, - profile: profile, - service: service, - tag: tag, - UninstallFn: unInstallFn, - workingDir: "/var/lib/elastic-agent", - }, nil -} - -// newDebianInstaller returns an instance of the Debian installer -func newDebianInstaller(image string, tag string) (ElasticAgentInstaller, error) { - image = image + "-systemd" // we want to consume systemd boxes - service := image - profile := FleetProfileName - - // extract the agent in the box, as it's mounted as a volume - artifact := "elastic-agent" - version := agentVersion - os := "linux" - arch := "amd64" - extension := "deb" - - binaryName, binaryPath, err := downloadAgentBinary(artifact, version, os, arch, extension) - if err != nil { - log.WithFields(log.Fields{ - "artifact": artifact, - "version": version, - "os": os, - "arch": arch, - "extension": extension, - "error": err, - }).Error("Could not download the binary for the agent") - return ElasticAgentInstaller{}, err - } - - preInstallFn := func() error { - log.Trace("No preinstall commands for Debian + systemd") - return nil - } - installFn := func(containerName string, token string) error { - cmds := []string{"apt", "install", "/" + binaryName, "-y"} - return extractPackage(profile, image, service, cmds) - } - enrollFn := func(token string) error { - args := []string{"http://kibana:5601", token, "-f", "--insecure"} - - return runElasticAgentCommand(profile, image, service, ElasticAgentProcessName, "enroll", args) - } - postInstallFn := func() error { - err = systemctlRun(profile, image, service, "enable") - if err != nil { - return err - } - return systemctlRun(profile, image, service, "start") - } - unInstallFn := func() error { - log.Trace("No uninstall commands for Debian + systemd") - return nil - } - installCertsFn := func() error { - if err := execCommandInService(profile, image, service, []string{"apt-get", "update"}, false); err != nil { - return err - } - if err := execCommandInService(profile, image, service, []string{"apt", "install", "ca-certificates", "-y"}, false); err != nil { - return err - } - if err := execCommandInService(profile, image, service, []string{"update-ca-certificates"}, false); err != nil { - return err - } - return nil - } - - binDir := "/var/lib/elastic-agent/data/elastic-agent-%s/" - - return ElasticAgentInstaller{ - artifactArch: arch, - artifactExtension: extension, - artifactName: artifact, - artifactOS: os, - artifactVersion: version, - binDir: binDir, - commitFile: ".elastic-agent.active.commit", - EnrollFn: enrollFn, - homeDir: "/etc/elastic-agent/", - image: image, - InstallFn: installFn, - InstallCertsFn: installCertsFn, - installerType: "deb", - logFile: "elastic-agent-json.log", - logsDir: binDir + "logs/", - name: binaryName, - path: binaryPath, - PostInstallFn: postInstallFn, - PreInstallFn: preInstallFn, - processName: ElasticAgentProcessName, - profile: profile, - service: service, - tag: tag, - UninstallFn: unInstallFn, - workingDir: "/var/lib/elastic-agent", - }, nil -} - -// newTarInstaller returns an instance of the Debian installer -func newTarInstaller(image string, tag string) (ElasticAgentInstaller, error) { - image = image + "-systemd" // we want to consume systemd boxes - service := image - profile := FleetProfileName - - // extract the agent in the box, as it's mounted as a volume - artifact := "elastic-agent" - version := agentVersion - os := "linux" - arch := "x86_64" - extension := "tar.gz" - - tarFile, binaryPath, err := downloadAgentBinary(artifact, version, os, arch, extension) - if err != nil { - log.WithFields(log.Fields{ - "artifact": artifact, - "version": version, - "os": os, - "arch": arch, - "extension": extension, - "error": err, - }).Error("Could not download the binary for the agent") - return ElasticAgentInstaller{}, err - } - - commitFile := ".elastic-agent.active.commit" - homeDir := "/elastic-agent/" - binDir := "/usr/bin/" - - preInstallFn := func() error { - commitFile := homeDir + commitFile - return installFromTar(profile, image, service, tarFile, commitFile, artifact, checkElasticAgentVersion(version), os, arch) - } - installFn := func(containerName string, token string) error { - // install the elastic-agent to /usr/bin/elastic-agent using command - binary := fmt.Sprintf("/elastic-agent/%s", artifact) - args := []string{"--force", "--insecure", "--enrollment-token", token, "--kibana-url", "http://kibana:5601"} - - err = runElasticAgentCommand(profile, image, service, binary, "install", args) - if err != nil { - return fmt.Errorf("Failed to install the agent with subcommand: %v", err) - } - return nil - } - enrollFn := func(token string) error { - args := []string{"http://kibana:5601", token, "-f", "--insecure"} - - return runElasticAgentCommand(profile, image, service, ElasticAgentProcessName, "enroll", args) - } - postInstallFn := func() error { - log.Trace("No postinstall commands for TAR installer") - return nil - } - unInstallFn := func() error { - args := []string{"-f"} - - return runElasticAgentCommand(profile, image, service, ElasticAgentProcessName, "uninstall", args) - } - installCertsFn := func() error { - if err := execCommandInService(profile, image, service, []string{"apt-get", "update"}, false); err != nil { - return err - } - if err := execCommandInService(profile, image, service, []string{"apt", "install", "ca-certificates", "-y"}, false); err != nil { - return err - } - if err := execCommandInService(profile, image, service, []string{"update-ca-certificates"}, false); err != nil { - return err - } - return nil - } - - return ElasticAgentInstaller{ - artifactArch: arch, - artifactExtension: extension, - artifactName: artifact, - artifactOS: os, - artifactVersion: version, - binDir: binDir, - commitFile: commitFile, - EnrollFn: enrollFn, - homeDir: homeDir, - image: image, - InstallFn: installFn, - InstallCertsFn: installCertsFn, - installerType: "tar", - logFile: "elastic-agent.log", - logsDir: "/opt/Elastic/Agent/", - name: tarFile, - path: binaryPath, - PostInstallFn: postInstallFn, - PreInstallFn: preInstallFn, - processName: ElasticAgentProcessName, - profile: profile, - service: service, - tag: tag, - UninstallFn: unInstallFn, - workingDir: "/opt/Elastic/Agent/", - }, nil -} - -func extractPackage(profile string, image string, service string, cmds []string) error { - err := execCommandInService(profile, image, service, cmds, false) - if err != nil { - log.WithFields(log.Fields{ - "command": cmds, - "error": err, - "image": image, - "service": service, - }).Error("Could not extract agent package in the box") - - return err - } - - return nil -} - -func installFromTar(profile string, image string, service string, tarFile string, commitFile string, artifact string, version string, OS string, arch string) error { - err := extractPackage(profile, image, service, []string{"tar", "-xvf", "/" + tarFile}) - if err != nil { - return err - } - - // simplify layout - cmds := []string{"mv", fmt.Sprintf("/%s-%s-%s-%s", artifact, version, OS, arch), "/elastic-agent"} - err = execCommandInService(profile, image, service, cmds, false) - if err != nil { - log.WithFields(log.Fields{ - "command": cmds, - "error": err, - "image": image, - "service": service, - }).Error("Could not extract agent package in the box") - - return err - } - - return nil -} - -func systemctlRun(profile string, image string, service string, command string) error { - cmd := []string{"systemctl", command, ElasticAgentProcessName} - err := execCommandInService(profile, image, service, cmd, false) - if err != nil { - log.WithFields(log.Fields{ - "command": cmd, - "error": err, - "service": service, - }).Errorf("Could not %s the service", command) - - return err - } - - log.WithFields(log.Fields{ - "command": cmd, - "service": service, - }).Trace("Systemctl executed") - return nil -} diff --git a/e2e/_suites/fleet/stand-alone.go b/e2e/_suites/fleet/stand-alone.go deleted file mode 100644 index c5a5198050..0000000000 --- a/e2e/_suites/fleet/stand-alone.go +++ /dev/null @@ -1,298 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package main - -import ( - "context" - "fmt" - "os" - "strings" - "time" - - "github.com/cucumber/godog" - "github.com/elastic/e2e-testing/cli/docker" - "github.com/elastic/e2e-testing/cli/services" - "github.com/elastic/e2e-testing/e2e" - log "github.com/sirupsen/logrus" -) - -// StandAloneTestSuite represents the scenarios for Stand-alone-mode -type StandAloneTestSuite struct { - AgentConfigFilePath string - Cleanup bool - Hostname string - Image string - // date controls for queries - AgentStoppedDate time.Time - RuntimeDependenciesStartDate time.Time -} - -// afterScenario destroys the state created by a scenario -func (sats *StandAloneTestSuite) afterScenario() { - serviceManager := services.NewServiceManager() - serviceName := ElasticAgentServiceName - - if log.IsLevelEnabled(log.DebugLevel) { - _ = sats.getContainerLogs() - } - - if !developerMode { - _ = serviceManager.RemoveServicesFromCompose(FleetProfileName, []string{serviceName}, profileEnv) - } else { - log.WithField("service", serviceName).Info("Because we are running in development mode, the service won't be stopped") - } - - if _, err := os.Stat(sats.AgentConfigFilePath); err == nil { - os.Remove(sats.AgentConfigFilePath) - log.WithFields(log.Fields{ - "path": sats.AgentConfigFilePath, - }).Debug("Elastic Agent configuration file removed.") - } -} - -func (sats *StandAloneTestSuite) contributeSteps(s *godog.Suite) { - s.Step(`^a "([^"]*)" stand-alone agent is deployed$`, sats.aStandaloneAgentIsDeployed) - s.Step(`^there is new data in the index from agent$`, sats.thereIsNewDataInTheIndexFromAgent) - s.Step(`^the "([^"]*)" docker container is stopped$`, sats.theDockerContainerIsStopped) - s.Step(`^there is no new data in the index after agent shuts down$`, sats.thereIsNoNewDataInTheIndexAfterAgentShutsDown) -} - -func (sats *StandAloneTestSuite) aStandaloneAgentIsDeployed(image string) error { - log.Trace("Deploying an agent to Fleet") - - serviceManager := services.NewServiceManager() - - profileEnv["elasticAgentDockerImageSuffix"] = "" - if image != "default" { - profileEnv["elasticAgentDockerImageSuffix"] = "-" + image - } - - profileEnv["elasticAgentDockerNamespace"] = e2e.GetDockerNamespaceEnvVar() - - containerName := fmt.Sprintf("%s_%s_%d", FleetProfileName, ElasticAgentServiceName, 1) - - configurationFileURL := "https://raw.githubusercontent.com/elastic/beats/master/x-pack/elastic-agent/elastic-agent.docker.yml" - - configurationFilePath, err := e2e.DownloadFile(configurationFileURL) - if err != nil { - return err - } - sats.AgentConfigFilePath = configurationFilePath - - profileEnv["elasticAgentContainerName"] = containerName - profileEnv["elasticAgentConfigFile"] = sats.AgentConfigFilePath - profileEnv["elasticAgentTag"] = agentVersion - - err = serviceManager.AddServicesToCompose(FleetProfileName, []string{ElasticAgentServiceName}, profileEnv) - if err != nil { - log.Error("Could not deploy the elastic-agent") - return err - } - - // get container hostname once - hostname, err := getContainerHostname(containerName) - if err != nil { - return err - } - - sats.Image = image - sats.Hostname = hostname - sats.Cleanup = true - - err = sats.installTestTools(containerName) - if err != nil { - return err - } - - return nil -} - -func (sats *StandAloneTestSuite) getContainerLogs() error { - serviceManager := services.NewServiceManager() - - profile := FleetProfileName - serviceName := ElasticAgentServiceName - - composes := []string{ - profile, // profile name - serviceName, // agent service - } - err := serviceManager.RunCommand(profile, composes, []string{"logs", serviceName}, profileEnv) - if err != nil { - log.WithFields(log.Fields{ - "error": err, - "service": serviceName, - }).Error("Could not retrieve Elastic Agent logs") - - return err - } - - return nil -} - -// installTestTools we need the container name because we use the Docker Client instead of Docker Compose -// we are going to install those tools we use in the test framework for checking -// and verifications -func (sats *StandAloneTestSuite) installTestTools(containerName string) error { - if sats.Image != "ubi8" { - return nil - } - - cmd := []string{"microdnf", "install", "procps-ng"} - - log.WithFields(log.Fields{ - "command": cmd, - "containerName": containerName, - }).Trace("Installing test tools ") - - _, err := docker.ExecCommandIntoContainer(context.Background(), containerName, "root", cmd) - if err != nil { - log.WithFields(log.Fields{ - "command": cmd, - "containerName": containerName, - "error": err, - }).Error("Could not install test tools using the Docker client") - return err - } - - log.WithFields(log.Fields{ - "command": cmd, - "containerName": containerName, - }).Debug("Test tools installed") - - return nil -} - -func (sats *StandAloneTestSuite) thereIsNewDataInTheIndexFromAgent() error { - maxTimeout := time.Duration(timeoutFactor) * time.Minute * 2 - minimumHitsCount := 50 - - result, err := searchAgentData(sats.Hostname, sats.RuntimeDependenciesStartDate, minimumHitsCount, maxTimeout) - if err != nil { - return err - } - - log.Tracef("Search result: %v", result) - - return e2e.AssertHitsArePresent(result) -} - -func (sats *StandAloneTestSuite) theDockerContainerIsStopped(serviceName string) error { - serviceManager := services.NewServiceManager() - - err := serviceManager.RemoveServicesFromCompose(FleetProfileName, []string{serviceName}, profileEnv) - if err != nil { - return err - } - sats.AgentStoppedDate = time.Now().UTC() - - return nil -} - -func (sats *StandAloneTestSuite) thereIsNoNewDataInTheIndexAfterAgentShutsDown() error { - maxTimeout := time.Duration(30) * time.Second - minimumHitsCount := 1 - - result, err := searchAgentData(sats.Hostname, sats.AgentStoppedDate, minimumHitsCount, maxTimeout) - if err != nil { - if strings.Contains(err.Error(), "type:index_not_found_exception") { - return err - } - - log.WithFields(log.Fields{ - "error": err, - }).Info("No documents were found for the Agent in the index after it stopped") - return nil - } - - return e2e.AssertHitsAreNotPresent(result) -} - -func searchAgentData(hostname string, startDate time.Time, minimumHitsCount int, maxTimeout time.Duration) (e2e.SearchResult, error) { - timezone := "America/New_York" - - esQuery := map[string]interface{}{ - "version": true, - "size": 500, - "docvalue_fields": []map[string]interface{}{ - { - "field": "@timestamp", - "format": "date_time", - }, - { - "field": "system.process.cpu.start_time", - "format": "date_time", - }, - { - "field": "system.service.state_since", - "format": "date_time", - }, - }, - "_source": map[string]interface{}{ - "excludes": []map[string]interface{}{}, - }, - "query": map[string]interface{}{ - "bool": map[string]interface{}{ - "must": []map[string]interface{}{}, - "filter": []map[string]interface{}{ - { - "bool": map[string]interface{}{ - "filter": []map[string]interface{}{ - { - "bool": map[string]interface{}{ - "should": []map[string]interface{}{ - { - "match_phrase": map[string]interface{}{ - "host.name": hostname, - }, - }, - }, - "minimum_should_match": 1, - }, - }, - { - "bool": map[string]interface{}{ - "should": []map[string]interface{}{ - { - "range": map[string]interface{}{ - "@timestamp": map[string]interface{}{ - "gte": startDate, - "time_zone": timezone, - }, - }, - }, - }, - "minimum_should_match": 1, - }, - }, - }, - }, - }, - { - "range": map[string]interface{}{ - "@timestamp": map[string]interface{}{ - "gte": startDate, - "format": "strict_date_optional_time", - }, - }, - }, - }, - "should": []map[string]interface{}{}, - "must_not": []map[string]interface{}{}, - }, - }, - } - - indexName := "logs-elastic_agent-default" - - result, err := e2e.WaitForNumberOfHits(indexName, esQuery, minimumHitsCount, maxTimeout) - if err != nil { - log.WithFields(log.Fields{ - "error": err, - }).Warn(e2e.WaitForIndices()) - } - - return result, err -} From 8034058d54ea6ec4ac2546c238e8e6af9cafd775 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Manuel=20de=20la=20Pe=C3=B1a?= Date: Tue, 12 Jan 2021 18:45:10 +0100 Subject: [PATCH 02/13] chore: bump 6.8 versions --- .ci/Jenkinsfile | 4 ++-- .ci/scripts/clean-docker.sh | 7 +------ .ci/scripts/functional-test.sh | 8 ++++---- .../compose/profiles/metricbeat/docker-compose.yml | 2 +- .../compose/services/metricbeat/docker-compose.yml | 2 +- e2e/README.md | 1 + e2e/_suites/metricbeat/README.md | 4 ++-- e2e/_suites/metricbeat/metricbeat_test.go | 2 +- e2e/utils.go | 14 +++++++------- 9 files changed, 20 insertions(+), 24 deletions(-) diff --git a/.ci/Jenkinsfile b/.ci/Jenkinsfile index 3693a56253..5e39345066 100644 --- a/.ci/Jenkinsfile +++ b/.ci/Jenkinsfile @@ -40,8 +40,8 @@ pipeline { choice(name: 'LOG_LEVEL', choices: ['DEBUG', 'INFO'], description: 'Log level to be used') choice(name: 'TIMEOUT_FACTOR', choices: ['3', '5', '7', '11'], description: 'Max number of minutes for timeout backoff strategies') string(name: 'FLEET_STACK_VERSION', defaultValue: '8.0.0-SNAPSHOT', description: 'SemVer version of the stack to be used for Fleet tests.') - string(name: 'METRICBEAT_STACK_VERSION', defaultValue: '8.0.0-SNAPSHOT', description: 'SemVer version of the stack to be used for Metricbeat tests.') - string(name: 'METRICBEAT_VERSION', defaultValue: '8.0.0-SNAPSHOT', description: 'SemVer version of the metricbeat to be used.') + string(name: 'METRICBEAT_STACK_VERSION', defaultValue: '6.8-SNAPSHOT', description: 'SemVer version of the stack to be used for Metricbeat tests.') + string(name: 'METRICBEAT_VERSION', defaultValue: '6.8-SNAPSHOT', description: 'SemVer version of the metricbeat to be used.') string(name: 'HELM_CHART_VERSION', defaultValue: '7.10.0', description: 'SemVer version of Helm chart to be used.') string(name: 'HELM_VERSION', defaultValue: '3.4.1', description: 'SemVer version of Helm to be used.') string(name: 'HELM_KIND_VERSION', defaultValue: '0.8.1', description: 'SemVer version of Kind to be used.') diff --git a/.ci/scripts/clean-docker.sh b/.ci/scripts/clean-docker.sh index b8f7e81daf..14e766005e 100755 --- a/.ci/scripts/clean-docker.sh +++ b/.ci/scripts/clean-docker.sh @@ -9,17 +9,12 @@ set -euxo pipefail # Build and test the app using the install and test make goals. # -readonly VERSION="8.0.0-SNAPSHOT" +readonly VERSION="6.8-SNAPSHOT" main() { # refresh docker images cat <.tmp_images -docker.elastic.co/beats/elastic-agent:${VERSION} -docker.elastic.co/beats/elastic-agent-ubi8:${VERSION} docker.elastic.co/elasticsearch/elasticsearch:${VERSION} -docker.elastic.co/kibana/kibana:${VERSION} -docker.elastic.co/observability-ci/elastic-agent:${VERSION} -docker.elastic.co/observability-ci/elastic-agent-ubi8:${VERSION} docker.elastic.co/observability-ci/elasticsearch:${VERSION} docker.elastic.co/observability-ci/kibana:${VERSION} EOF diff --git a/.ci/scripts/functional-test.sh b/.ci/scripts/functional-test.sh index 4078fcc0a8..0f1a1ca44e 100755 --- a/.ci/scripts/functional-test.sh +++ b/.ci/scripts/functional-test.sh @@ -12,14 +12,14 @@ set -euxo pipefail # Parameters: # - SUITE - that's the suite to be tested. Default '' which means all of them. # - TAGS - that's the tags to be tested. Default '' which means all of them. -# - STACK_VERSION - that's the version of the stack to be tested. Default '8.0.0-SNAPSHOT'. -# - METRICBEAT_VERSION - that's the version of the metricbeat to be tested. Default '8.0.0-SNAPSHOT'. +# - STACK_VERSION - that's the version of the stack to be tested. Default '6.8-SNAPSHOT'. +# - METRICBEAT_VERSION - that's the version of the metricbeat to be tested. Default '6.8-SNAPSHOT'. # SUITE=${1:-''} TAGS=${2:-''} -STACK_VERSION=${3:-'8.0.0-SNAPSHOT'} -METRICBEAT_VERSION=${4:-'8.0.0-SNAPSHOT'} +STACK_VERSION=${3:-'6.8-SNAPSHOT'} +METRICBEAT_VERSION=${4:-'6.8-SNAPSHOT'} TARGET_OS=${GOOS:-linux} TARGET_ARCH=${GOARCH:-amd64} diff --git a/cli/config/compose/profiles/metricbeat/docker-compose.yml b/cli/config/compose/profiles/metricbeat/docker-compose.yml index 4aeda9a80e..36b55d3136 100644 --- a/cli/config/compose/profiles/metricbeat/docker-compose.yml +++ b/cli/config/compose/profiles/metricbeat/docker-compose.yml @@ -9,6 +9,6 @@ services: - xpack.monitoring.collection.enabled=true - ELASTIC_USERNAME=elastic - ELASTIC_PASSWORD=changeme - image: "docker.elastic.co/observability-ci/elasticsearch:${stackVersion:-8.0.0-SNAPSHOT}" + image: "docker.elastic.co/observability-ci/elasticsearch:${stackVersion:-6.8-SNAPSHOT}" ports: - "9200:9200" diff --git a/cli/config/compose/services/metricbeat/docker-compose.yml b/cli/config/compose/services/metricbeat/docker-compose.yml index 1d8a3365c7..4e72618955 100644 --- a/cli/config/compose/services/metricbeat/docker-compose.yml +++ b/cli/config/compose/services/metricbeat/docker-compose.yml @@ -14,7 +14,7 @@ services: ] environment: - BEAT_STRICT_PERMS=${beatStricPerms:-false} - image: "docker.elastic.co/${metricbeatDockerNamespace:-beats}/metricbeat:${metricbeatTag:-8.0.0-SNAPSHOT}" + image: "docker.elastic.co/${metricbeatDockerNamespace:-beats}/metricbeat:${metricbeatTag:-6.8-SNAPSHOT}" labels: co.elastic.logs/module: "${serviceName}" volumes: diff --git a/e2e/README.md b/e2e/README.md index 9efb0ab9e3..f2d441e925 100644 --- a/e2e/README.md +++ b/e2e/README.md @@ -103,6 +103,7 @@ In order to debug the `godog` tests, 1) you must have the `runner_test.go` file ## Regression testing We have built the project and the CI job in a manner that it is possible to override different parameters about projects versions, so that we can set i.e. the version of the Elastic Stack to be used, or the version of the Elastic Agent. There also exist maintenance branches where we set the specific versions used for the tests: +- **6.8.x**: will use `6.8.x` alias for the Elastic Stack and Metricbeat - **7.10.x**: will use `7.10.x` alias for the Elastic Stack, the Agent and Metricbeat - **7.11.x**: will use `7.11.x` alias for the Elastic Stack, the Agent and Metricbeat - **7.x**: will use `7.x` alias for the Elastic Stack, the Agent and Metricbeat diff --git a/e2e/_suites/metricbeat/README.md b/e2e/_suites/metricbeat/README.md index acb8dd5718..7ba2cba186 100644 --- a/e2e/_suites/metricbeat/README.md +++ b/e2e/_suites/metricbeat/README.md @@ -45,8 +45,8 @@ This is an example of the optional configuration: ```shell # There should be a Docker image for the runtime dependencies (elasticsearch, kibana, package registry) - export STACK_VERSION="8.0.0-SNAPSHOT" - export METRICBEAT_VERSION="8.0.0-SNAPSHOT" + export STACK_VERSION="6.8-SNAPSHOT" + export METRICBEAT_VERSION="6.8-SNAPSHOT" # or # This environment variable will use the snapshots produced by Beats CI export BEATS_USE_CI_SNAPSHOTS="true" diff --git a/e2e/_suites/metricbeat/metricbeat_test.go b/e2e/_suites/metricbeat/metricbeat_test.go index 273f1d94a6..d7e1584179 100644 --- a/e2e/_suites/metricbeat/metricbeat_test.go +++ b/e2e/_suites/metricbeat/metricbeat_test.go @@ -27,7 +27,7 @@ import ( // It can be overriden by the DEVELOPER_MODE env var var developerMode = false -const metricbeatVersionBase = "8.0.0-SNAPSHOT" +const metricbeatVersionBase = "6.8-SNAPSHOT" // metricbeatVersion is the version of the metricbeat to use // It can be overriden by METRICBEAT_VERSION env var diff --git a/e2e/utils.go b/e2e/utils.go index e6b994981c..b972b3104b 100644 --- a/e2e/utils.go +++ b/e2e/utils.go @@ -54,7 +54,7 @@ func GetExponentialBackOff(elapsedTime time.Duration) *backoff.ExponentialBackOf // GetElasticArtifactVersion returns the current version: // 1. Elastic's artifact repository, building the JSON path query based // If the version is a PR, then it will return the version without checking the artifacts API -// i.e. GetElasticArtifactVersion("8.0.0-SNAPSHOT") +// i.e. GetElasticArtifactVersion("6.8-SNAPSHOT) // i.e. GetElasticArtifactVersion("pr-22000") func GetElasticArtifactVersion(version string) string { if strings.HasPrefix(strings.ToLower(version), "pr-") { @@ -132,9 +132,9 @@ func GetElasticArtifactVersion(version string) string { // on the desired OS, architecture and file extension: // 1. Observability CI Storage bucket // 2. Elastic's artifact repository, building the JSON path query based -// i.e. GetElasticArtifactURL("elastic-agent", "8.0.0-SNAPSHOT", "linux", "x86_64", "tar.gz") -// i.e. GetElasticArtifactURL("elastic-agent", "8.0.0-SNAPSHOT", "x86_64", "rpm") -// i.e. GetElasticArtifactURL("elastic-agent", "8.0.0-SNAPSHOT", "amd64", "deb") +// i.e. GetElasticArtifactURL("elastic-agent", "6.8-SNAPSHOT", "linux", "x86_64", "tar.gz") +// i.e. GetElasticArtifactURL("elastic-agent", "6.8-SNAPSHOT", "x86_64", "rpm") +// i.e. GetElasticArtifactURL("elastic-agent", "6.8-SNAPSHOT", "amd64", "deb") func GetElasticArtifactURL(artifact string, version string, operativeSystem string, arch string, extension string) (string, error) { exp := GetExponentialBackOff(time.Minute) @@ -193,11 +193,11 @@ func GetElasticArtifactURL(artifact string, version string, operativeSystem stri return "", err } - // elastic-agent-8.0.0-SNAPSHOT-linux-x86_64.tar.gz + // elastic-agent-6.8-SNAPSHOT-linux-x86_64.tar.gz artifactPath := fmt.Sprintf("%s-%s-%s-%s.%s", artifact, version, operativeSystem, arch, extension) if extension == "deb" || extension == "rpm" { - // elastic-agent-8.0.0-SNAPSHOT-x86_64.rpm - // elastic-agent-8.0.0-SNAPSHOT-amd64.deb + // elastic-agent-6.8-SNAPSHOT-x86_64.rpm + // elastic-agent-6.8-SNAPSHOT-amd64.deb artifactPath = fmt.Sprintf("%s-%s-%s.%s", artifact, version, arch, extension) } From 161bc01b8b5840a8d75d5b5245d9ba55998c8e69 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Manuel=20de=20la=20Pe=C3=B1a?= Date: Tue, 12 Jan 2021 19:38:47 +0100 Subject: [PATCH 03/13] fix: use principal namespace --- cli/config/compose/profiles/metricbeat/docker-compose.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cli/config/compose/profiles/metricbeat/docker-compose.yml b/cli/config/compose/profiles/metricbeat/docker-compose.yml index 36b55d3136..4387055def 100644 --- a/cli/config/compose/profiles/metricbeat/docker-compose.yml +++ b/cli/config/compose/profiles/metricbeat/docker-compose.yml @@ -9,6 +9,6 @@ services: - xpack.monitoring.collection.enabled=true - ELASTIC_USERNAME=elastic - ELASTIC_PASSWORD=changeme - image: "docker.elastic.co/observability-ci/elasticsearch:${stackVersion:-6.8-SNAPSHOT}" + image: "docker.elastic.co/elasticsearch/elasticsearch:${stackVersion:-6.8-SNAPSHOT}" ports: - "9200:9200" From ee8e79f3b33eeec81b16506584f98d7c853ff8e8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Manuel=20de=20la=20Pe=C3=B1a?= Date: Tue, 12 Jan 2021 19:55:18 +0100 Subject: [PATCH 04/13] fix: clone beats in the proper branch --- e2e/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/e2e/Makefile b/e2e/Makefile index e9110e5471..4f44ab2b2d 100644 --- a/e2e/Makefile +++ b/e2e/Makefile @@ -83,4 +83,4 @@ notice: .PHONY: sync-integrations sync-integrations: - OP_LOG_LEVEL=${LOG_LEVEL} ./op sync integrations --delete + OP_LOG_LEVEL=${LOG_LEVEL} ./op sync integrations --delete --remote "elastic:6.8" From 035374e9f54958048678242e143b19aa2664127b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Manuel=20de=20la=20Pe=C3=B1a?= Date: Wed, 13 Jan 2021 21:49:38 +0100 Subject: [PATCH 05/13] chore: use compose files from 7.x --- e2e/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/e2e/Makefile b/e2e/Makefile index 4f44ab2b2d..748f1fb480 100644 --- a/e2e/Makefile +++ b/e2e/Makefile @@ -83,4 +83,4 @@ notice: .PHONY: sync-integrations sync-integrations: - OP_LOG_LEVEL=${LOG_LEVEL} ./op sync integrations --delete --remote "elastic:6.8" + OP_LOG_LEVEL=${LOG_LEVEL} ./op sync integrations --delete --remote "elastic:7.x" From 65e123dda4a993a76347dc56db78f857438985be Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Manuel=20de=20la=20Pe=C3=B1a?= Date: Thu, 14 Jan 2021 17:52:46 +0100 Subject: [PATCH 06/13] chore: use a fixed version, the first including supported-versions files --- e2e/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/e2e/Makefile b/e2e/Makefile index 748f1fb480..abc6bd1956 100644 --- a/e2e/Makefile +++ b/e2e/Makefile @@ -83,4 +83,4 @@ notice: .PHONY: sync-integrations sync-integrations: - OP_LOG_LEVEL=${LOG_LEVEL} ./op sync integrations --delete --remote "elastic:7.x" + OP_LOG_LEVEL=${LOG_LEVEL} ./op sync integrations --delete --remote "elastic:7.6" From 1c7b6ee808924af280c8b8c32c1f68a9c8bee79e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Manuel=20de=20la=20Pe=C3=B1a?= Date: Mon, 18 Jan 2021 08:40:24 +0100 Subject: [PATCH 07/13] chore: use Beats 7.7 branch for fetching integrations --- e2e/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/e2e/Makefile b/e2e/Makefile index a164c39cba..2d420456c6 100644 --- a/e2e/Makefile +++ b/e2e/Makefile @@ -85,7 +85,7 @@ notice: .PHONY: sync-integrations sync-integrations: - OP_LOG_LEVEL=${LOG_LEVEL} ./op sync integrations --delete --remote "elastic:7.6" + OP_LOG_LEVEL=${LOG_LEVEL} ./op sync integrations --delete --remote "elastic:7.7" .PHONY: unit-test unit-test: From 438745fd21a2d9fcf642a4c13b5b9423db28b900 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Manuel=20de=20la=20Pe=C3=B1a?= Date: Mon, 18 Jan 2021 20:51:59 +0100 Subject: [PATCH 08/13] chore: enable ILM for metricbeat in 6.8 --- cli/config/compose/services/metricbeat/docker-compose.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/cli/config/compose/services/metricbeat/docker-compose.yml b/cli/config/compose/services/metricbeat/docker-compose.yml index 4e72618955..2e58c83c01 100644 --- a/cli/config/compose/services/metricbeat/docker-compose.yml +++ b/cli/config/compose/services/metricbeat/docker-compose.yml @@ -4,7 +4,8 @@ services: command: [ "metricbeat", "-e", "-E", "logging.level=${logLevel}", - "-E", "setup.ilm.rollover_alias=${indexName}", + "-E", "output.elasticsearch.ilm.enabled=true", + "-E", "output.elasticsearch.ilm.rollover_alias=${indexName}", "-E", "output.elasticsearch.hosts=http://elasticsearch:9200", "-E", "output.elasticsearch.password=changeme", "-E", "output.elasticsearch.username=elastic", From 44cf5792f433806981b769f11a89c813eb244793 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Manuel=20de=20la=20Pe=C3=B1a?= Date: Mon, 18 Jan 2021 20:52:21 +0100 Subject: [PATCH 09/13] fix: use proper metricbeat's configuration file path --- cli/config/compose/services/metricbeat/docker-compose.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cli/config/compose/services/metricbeat/docker-compose.yml b/cli/config/compose/services/metricbeat/docker-compose.yml index 2e58c83c01..bc01c2c16c 100644 --- a/cli/config/compose/services/metricbeat/docker-compose.yml +++ b/cli/config/compose/services/metricbeat/docker-compose.yml @@ -19,4 +19,4 @@ services: labels: co.elastic.logs/module: "${serviceName}" volumes: - - "${metricbeatConfigFile}:/usr/share/metricbeat/metricbeat.yml" + - "${metricbeatConfigFile}:/etc/metricbeat/metricbeat.yml" From cfa07cb6070263832d46a4472f46a77d78c04301 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Manuel=20de=20la=20Pe=C3=B1a?= Date: Mon, 18 Jan 2021 21:03:08 +0100 Subject: [PATCH 10/13] fix: hits.total is float64 in 6.8 --- e2e/elasticsearch.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/e2e/elasticsearch.go b/e2e/elasticsearch.go index d3791251db..e1f43e6c6f 100644 --- a/e2e/elasticsearch.go +++ b/e2e/elasticsearch.go @@ -205,7 +205,7 @@ func search(indexName string, query map[string]interface{}) (SearchResult, error log.WithFields(log.Fields{ "status": res.Status(), - "hits": int(result["hits"].(map[string]interface{})["total"].(map[string]interface{})["value"].(float64)), + "hits": int(result["hits"].(map[string]interface{})["total"].(float64)), "took": int(result["took"].(float64)), }).Debug("Response information") From a9add26540a29122df24b717b055842f07098ede Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Manuel=20de=20la=20Pe=C3=B1a?= Date: Mon, 18 Jan 2021 21:40:53 +0100 Subject: [PATCH 11/13] fix: use system.network as the event dataset in ES queries --- e2e/_suites/metricbeat/metricbeat_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/e2e/_suites/metricbeat/metricbeat_test.go b/e2e/_suites/metricbeat/metricbeat_test.go index 39b504d118..c50809c7d8 100644 --- a/e2e/_suites/metricbeat/metricbeat_test.go +++ b/e2e/_suites/metricbeat/metricbeat_test.go @@ -444,7 +444,7 @@ func (mts *MetricbeatTestSuite) thereAreEventsInTheIndex() error { "must": []map[string]interface{}{ { "match": map[string]interface{}{ - "event.module": mts.Query.EventModule, + "event.dataset": "system.network", }, }, }, @@ -480,7 +480,7 @@ func (mts *MetricbeatTestSuite) thereAreNoErrorsInTheIndex() error { "must": []map[string]interface{}{ { "match": map[string]interface{}{ - "event.module": mts.Query.EventModule, + "event.dataset": "system.network", }, }, }, From ddb7d267bea2894059e5b4ee817e1f29f296cb44 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Manuel=20de=20la=20Pe=C3=B1a?= Date: Tue, 26 Jan 2021 11:59:29 +0100 Subject: [PATCH 12/13] chore: remove old file after merge --- e2e/_suites/fleet/services_test.go | 196 ----------------------------- 1 file changed, 196 deletions(-) delete mode 100644 e2e/_suites/fleet/services_test.go diff --git a/e2e/_suites/fleet/services_test.go b/e2e/_suites/fleet/services_test.go deleted file mode 100644 index 615e6f23c9..0000000000 --- a/e2e/_suites/fleet/services_test.go +++ /dev/null @@ -1,196 +0,0 @@ -package main - -import ( - "os" - "testing" - - "github.com/stretchr/testify/assert" -) - -var testVersion = agentVersionBase - -func TestGetGCPBucketCoordinates_Commits(t *testing.T) { - artifact := "elastic-agent" - version := testVersion - OS := "linux" - - t.Run("Fetching commits bucket for RPM package", func(t *testing.T) { - defer os.Unsetenv("GITHUB_CHECK_SHA1") - os.Setenv("GITHUB_CHECK_SHA1", "0123456789") - - arch := "x86_64" - extension := "rpm" - fileName := "elastic-agent-" + testVersion + "-x86_64.rpm" - - newFileName, bucket, prefix, object := getGCPBucketCoordinates(fileName, artifact, version, OS, arch, extension) - assert.Equal(t, bucket, "beats-ci-artifacts") - assert.Equal(t, prefix, "commits/0123456789") - assert.Equal(t, newFileName, "elastic-agent-"+testVersion+"-x86_64.rpm") - assert.Equal(t, object, "elastic-agent/elastic-agent-"+testVersion+"-x86_64.rpm") - }) - - t.Run("Fetching commits bucket for DEB package", func(t *testing.T) { - defer os.Unsetenv("GITHUB_CHECK_SHA1") - os.Setenv("GITHUB_CHECK_SHA1", "0123456789") - - arch := "amd64" - extension := "deb" - fileName := "elastic-agent-" + testVersion + "-amd64.deb" - - newFileName, bucket, prefix, object := getGCPBucketCoordinates(fileName, artifact, version, OS, arch, extension) - assert.Equal(t, bucket, "beats-ci-artifacts") - assert.Equal(t, prefix, "commits/0123456789") - assert.Equal(t, newFileName, "elastic-agent-"+testVersion+"-amd64.deb") - assert.Equal(t, object, "elastic-agent/elastic-agent-"+testVersion+"-amd64.deb") - }) - - t.Run("Fetching commits bucket for TAR package adds OS to fileName and object", func(t *testing.T) { - defer os.Unsetenv("GITHUB_CHECK_SHA1") - os.Setenv("GITHUB_CHECK_SHA1", "0123456789") - - arch := "x86_64" - extension := "tar.gz" - fileName := "elastic-agent-" + testVersion + "-linux-x86_64.tar.gz" - - newFileName, bucket, prefix, object := getGCPBucketCoordinates(fileName, artifact, version, OS, arch, extension) - assert.Equal(t, bucket, "beats-ci-artifacts") - assert.Equal(t, prefix, "commits/0123456789") - assert.Equal(t, newFileName, "elastic-agent-"+testVersion+"-linux-x86_64.tar.gz") - assert.Equal(t, object, "elastic-agent/elastic-agent-"+testVersion+"-linux-x86_64.tar.gz") - }) -} - -func TestGetGCPBucketCoordinates_CommitsForAPullRequest(t *testing.T) { - artifact := "elastic-agent" - version := "pr-23456" - OS := "linux" - - t.Run("Fetching commits bucket for RPM package", func(t *testing.T) { - defer os.Unsetenv("GITHUB_CHECK_SHA1") - os.Setenv("GITHUB_CHECK_SHA1", "0123456789") - - arch := "x86_64" - extension := "rpm" - fileName := "elastic-agent-" + testVersion + "-x86_64.rpm" - - newFileName, bucket, prefix, object := getGCPBucketCoordinates(fileName, artifact, version, OS, arch, extension) - assert.Equal(t, bucket, "beats-ci-artifacts") - assert.Equal(t, prefix, "pull-requests/pr-23456") - assert.Equal(t, newFileName, "elastic-agent-"+testVersion+"-x86_64.rpm") - assert.Equal(t, object, "elastic-agent/elastic-agent-"+testVersion+"-x86_64.rpm") - }) - - t.Run("Fetching commits bucket for DEB package", func(t *testing.T) { - defer os.Unsetenv("GITHUB_CHECK_SHA1") - os.Setenv("GITHUB_CHECK_SHA1", "0123456789") - - arch := "amd64" - extension := "deb" - fileName := "elastic-agent-" + testVersion + "-amd64.deb" - - newFileName, bucket, prefix, object := getGCPBucketCoordinates(fileName, artifact, version, OS, arch, extension) - assert.Equal(t, bucket, "beats-ci-artifacts") - assert.Equal(t, prefix, "pull-requests/pr-23456") - assert.Equal(t, newFileName, "elastic-agent-"+testVersion+"-amd64.deb") - assert.Equal(t, object, "elastic-agent/elastic-agent-"+testVersion+"-amd64.deb") - }) - - t.Run("Fetching commits bucket for TAR package adds OS to fileName and object", func(t *testing.T) { - defer os.Unsetenv("GITHUB_CHECK_SHA1") - os.Setenv("GITHUB_CHECK_SHA1", "0123456789") - - arch := "x86_64" - extension := "tar.gz" - fileName := "elastic-agent-" + testVersion + "-linux-x86_64.tar.gz" - - newFileName, bucket, prefix, object := getGCPBucketCoordinates(fileName, artifact, version, OS, arch, extension) - assert.Equal(t, bucket, "beats-ci-artifacts") - assert.Equal(t, prefix, "pull-requests/pr-23456") - assert.Equal(t, newFileName, "elastic-agent-"+testVersion+"-linux-x86_64.tar.gz") - assert.Equal(t, object, "elastic-agent/elastic-agent-"+testVersion+"-linux-x86_64.tar.gz") - }) -} - -func TestGetGCPBucketCoordinates_PullRequests(t *testing.T) { - artifact := "elastic-agent" - version := "pr-23456" - OS := "linux" - - t.Run("Fetching commits bucket for RPM package", func(t *testing.T) { - arch := "x86_64" - extension := "rpm" - fileName := "elastic-agent-" + testVersion + "-x86_64.rpm" - - newFileName, bucket, prefix, object := getGCPBucketCoordinates(fileName, artifact, version, OS, arch, extension) - assert.Equal(t, newFileName, "elastic-agent-"+testVersion+"-x86_64.rpm") - assert.Equal(t, bucket, "beats-ci-artifacts") - assert.Equal(t, prefix, "pull-requests/pr-23456") - assert.Equal(t, object, "elastic-agent/elastic-agent-"+testVersion+"-x86_64.rpm") - }) - - t.Run("Fetching commits bucket for DEB package", func(t *testing.T) { - arch := "amd64" - extension := "deb" - fileName := "elastic-agent-" + testVersion + "-amd64.deb" - - newFileName, bucket, prefix, object := getGCPBucketCoordinates(fileName, artifact, version, OS, arch, extension) - assert.Equal(t, newFileName, "elastic-agent-"+testVersion+"-amd64.deb") - assert.Equal(t, bucket, "beats-ci-artifacts") - assert.Equal(t, prefix, "pull-requests/pr-23456") - assert.Equal(t, object, "elastic-agent/elastic-agent-"+testVersion+"-amd64.deb") - }) - - t.Run("Fetching commits bucket for TAR package adds OS to fileName and object", func(t *testing.T) { - arch := "x86_64" - extension := "tar.gz" - fileName := "elastic-agent-" + testVersion + "-linux-x86_64.tar.gz" - - newFileName, bucket, prefix, object := getGCPBucketCoordinates(fileName, artifact, version, OS, arch, extension) - assert.Equal(t, newFileName, "elastic-agent-"+testVersion+"-linux-x86_64.tar.gz") - assert.Equal(t, bucket, "beats-ci-artifacts") - assert.Equal(t, prefix, "pull-requests/pr-23456") - assert.Equal(t, object, "elastic-agent/elastic-agent-"+testVersion+"-linux-x86_64.tar.gz") - }) -} - -func TestGetGCPBucketCoordinates_Snapshots(t *testing.T) { - artifact := "elastic-agent" - version := testVersion - OS := "linux" - - t.Run("Fetching commits bucket for RPM package", func(t *testing.T) { - arch := "x86_64" - extension := "rpm" - fileName := "elastic-agent-" + testVersion + "-x86_64.rpm" - - newFileName, bucket, prefix, object := getGCPBucketCoordinates(fileName, artifact, version, OS, arch, extension) - assert.Equal(t, bucket, "beats-ci-artifacts") - assert.Equal(t, prefix, "snapshots/elastic-agent") - assert.Equal(t, newFileName, "elastic-agent-"+testVersion+"-x86_64.rpm") - assert.Equal(t, object, "elastic-agent-"+testVersion+"-x86_64.rpm") - }) - - t.Run("Fetching commits bucket for DEB package", func(t *testing.T) { - arch := "amd64" - extension := "deb" - fileName := "elastic-agent-" + testVersion + "-amd64.deb" - - newFileName, bucket, prefix, object := getGCPBucketCoordinates(fileName, artifact, version, OS, arch, extension) - assert.Equal(t, bucket, "beats-ci-artifacts") - assert.Equal(t, prefix, "snapshots/elastic-agent") - assert.Equal(t, newFileName, "elastic-agent-"+testVersion+"-amd64.deb") - assert.Equal(t, object, "elastic-agent-"+testVersion+"-amd64.deb") - }) - - t.Run("Fetching commits bucket for TAR package adds OS to fileName and object", func(t *testing.T) { - arch := "x86_64" - extension := "tar.gz" - fileName := "elastic-agent-" + testVersion + "-linux-x86_64.tar.gz" - - newFileName, bucket, prefix, object := getGCPBucketCoordinates(fileName, artifact, version, OS, arch, extension) - assert.Equal(t, bucket, "beats-ci-artifacts") - assert.Equal(t, prefix, "snapshots/elastic-agent") - assert.Equal(t, newFileName, "elastic-agent-"+testVersion+"-linux-x86_64.tar.gz") - assert.Equal(t, object, "elastic-agent-"+testVersion+"-linux-x86_64.tar.gz") - }) -} From 0d79fa995aa9b3a7e38d95cf36b189e9584bfb9f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Manuel=20de=20la=20Pe=C3=B1a?= Date: Mon, 1 Feb 2021 12:59:31 +0100 Subject: [PATCH 13/13] Revert "fix: use system.network as the event dataset in ES queries" This reverts commit a9add26540a29122df24b717b055842f07098ede. --- e2e/_suites/metricbeat/metricbeat_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/e2e/_suites/metricbeat/metricbeat_test.go b/e2e/_suites/metricbeat/metricbeat_test.go index 679e9fd018..443a7116ef 100644 --- a/e2e/_suites/metricbeat/metricbeat_test.go +++ b/e2e/_suites/metricbeat/metricbeat_test.go @@ -450,7 +450,7 @@ func (mts *MetricbeatTestSuite) thereAreEventsInTheIndex() error { "must": []map[string]interface{}{ { "match": map[string]interface{}{ - "event.dataset": "system.network", + "event.module": mts.Query.EventModule, }, }, }, @@ -486,7 +486,7 @@ func (mts *MetricbeatTestSuite) thereAreNoErrorsInTheIndex() error { "must": []map[string]interface{}{ { "match": map[string]interface{}{ - "event.dataset": "system.network", + "event.module": mts.Query.EventModule, }, }, },